Python with AI

Python AI brings together the power of Python programming with Artificial Intelligence (AI) capabilities. It's like teaching computers to think and learn like humans do, but using Python's simple and flexible language. With Python AI, developers can create smart programs that can understand language, recognize patterns in data, and even make decisions on their own. Whether you're just starting out or a seasoned programmer, Python AI offers an accessible and exciting way to explore the world of artificial intelligence. 

 NOTE : THE SHARED CODE MIGHT NOT BE APPROPRIATE TO VIEW ON MOBILE DEVICES KINDLY PREFER LAPTOP/PC.

Face recognition:

Face Recognition using Python. This innovative technology allows computers to recognize and identify human faces with remarkable accuracy.


Source code:

import face_recognition
import cv2
font_path = 'C:\Windows\Fonts\Maya.ttf'  # Specify the path to your Marathi font file
marathi_font = cv2.FONT_HERSHEY_SIMPLEX
video_capture = cv2.VideoCapture(0)

obama_image = face_recognition.load_image_file(r"D:\images\me.jpg")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
biden_image = face_recognition.load_image_file(r"D:\images\D2.jpg")
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
known_face_encodings = [
    obama_face_encoding,
    biden_face_encoding
]
known_face_names = [
    "BATMAN",
    "DIVYA"
]
face_locations = []
face_encodings = []
face_names = []

process_this_frame = True


while True:
     ret, frame = video_capture.read()

   frame = cv2.flip(frame, 1)    
if process_this_frame:
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25
        rgb_small_frame = small_frame[:, :, ::-1]
        face_locations = face_recognition.face_locations(rgb_small_frame)
        face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
        face_names = []
        for face_encoding in face_encodings:
            matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
            name = "Unknown"
            if True in matches:
                 first_match_index = matches.index(True)
                 name = known_face_names[first_match_index]
            face_names.append(name)
    process_this_frame = not process_this_frame
    for (top, right, bottom, left), name in zip(face_locations, face_names):
        top *= 4
        right *= 4
        bottom *= 4
        left *= 4
        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
        cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
        font = cv2.FONT_HERSHEY_DUPLEX
        cv2.putText(frame, name, (left + 6, bottom - 6), marathi_font, 1.0, (255, 255, 255), 1)
    cv2.imshow('Video', frame)
    if cv2.waitKey(1) & 0xFF == ord('a'):
        break
video_capture.release()
cv2.destroyAllWindows()

 NOTE : KINDLY INSTALL REQUIRED LIBRARIES TO RUN CODE SUCCESSFULLY!

In the underlined lines you have to put your image loacation from your folder to get recognized.

HAND GESTURE VOLUME CONTROL:

This innovative system allows you to control the volume of your devices using simple hand gestures, making it easier and more intuitive to adjust the volume without needing to touch any buttons or controls.

Here's how it works: The system uses a camera to capture real-time images of your hand gestures. These images are then processed using computer vision algorithms to detect and recognize specific hand gestures associated with volume control commands, such as increasing or decreasing the volume.

source code:

import cv2
import mediapipe as mp
from math import hypot
from ctypes import cast, POINTER
from comtypes import CLSCTX_ALL
from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume
import numpy as np

 
cap = cv2.VideoCapture(0) #Checks for camera
 
mpHands = mp.solutions.hands #detects hand/finger
hands = mpHands.Hands()   #complete the initialization configuration of hands
mpDraw = mp.solutions.drawing_utils
 
#To access speaker through the library pycaw 
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
volbar=400
volper=0
 
volMin,volMax = volume.GetVolumeRange()[:2]
 
while True:
    success,img = cap.read() #If camera works capture an image
    img = cv2.flip(img, 1)
    imgRGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB) #Convert to rgb
    
    #Collection of gesture information
    results = hands.process(imgRGB) #completes the image processing.
 
    lmList = [] #empty list
    if results.multi_hand_landmarks: #list of all hands detected.
        #By accessing the list, we can get the information of each hand's corresponding flag bit
        for handlandmark in results.multi_hand_landmarks:
            for id,lm in enumerate(handlandmark.landmark): #adding counter and returning it
                # Get finger joint points
                h,w,_ = img.shape
                cx,cy = int(lm.x*w),int(lm.y*h)
                lmList.append([id,cx,cy]) #adding to the empty list 'lmList'
            mpDraw.draw_landmarks(img,handlandmark,mpHands.HAND_CONNECTIONS)
    
    if lmList != []:
        #getting the value at a point
                        #x      #y
        x1,y1 = lmList[4][1],lmList[4][2]  #thumb
        x2,y2 = lmList[8][1],lmList[8][2]  #index finger
        #creating circle at the tips of thumb and index finger
        cv2.circle(img,(x1,y1),13,(255,0,0),cv2.FILLED) #image #fingers #radius #rgb
        cv2.circle(img,(x2,y2),13,(255,0,0),cv2.FILLED) #image #fingers #radius #rgb
        cv2.line(img,(x1,y1),(x2,y2),(255,0,0),3)  #create a line b/w tips of index finger and thumb
 
        length = hypot(x2-x1,y2-y1) #distance b/w tips using hypotenuse
 # from numpy we find our length,by converting hand range in terms of volume range ie b/w -63.5 to 0
        vol = np.interp(length,[30,350],[volMin,volMax]) 
        volbar=np.interp(length,[30,350],[400,150])
        volper=np.interp(length,[30,350],[0,100])
        
        
        print(vol,int(length))
        volume.SetMasterVolumeLevel(vol, None)
        
        # Hand range 30 - 350
        # Volume range -63.5 - 0.0
        #creating volume bar for volume level 
        cv2.rectangle(img,(50,150),(85,400),(0,0,255),4) # vid ,initial position ,ending position ,rgb ,thickness
        cv2.rectangle(img,(50,int(volbar)),(85,400),(0,0,255),cv2.FILLED)
        cv2.putText(img,f"{int(volper)}%",(10,40),cv2.FONT_ITALIC,1,(0, 255, 98),3)
        #tell the volume percentage ,location,font of text,length,rgb color,thickness
    cv2.imshow('Image',img) #Show the video 
    if cv2.waitKey(1) & 0xff==ord(' '): #By using spacebar delay will stop
        break
        
cap.release()     #stop cam       
cv2.destroyAllWindows() #close window


NOTE : KINDLY INSTALL REQUIRED LIBRARIES TO RUN CODE SUCCESSFULLY!



Comments

Popular Posts