''' This program will be the interactive screen for our system '''
import RPi.GPIO as GPIO # For GPIO pins on RPi
import time # Could be used for timeout
import pygame
from pygame.locals import* # For event #MOUSE variables
import os
import cv2
import numpy as np
import dlib
import sys
import pigpio

''' Draws buttons in buttons on the screen and saves in buttons_rect '''
def draw_buttons( buttons, buttons_rect, font ):
    for text, text_pos in buttons.items():
        text_surface = font.render(text, True, WHITE )
        rect = text_surface.get_rect( center = text_pos)
        screen.blit(text_surface, rect)
        buttons_rect[text] = rect # save rect in the dictionary

def draw_color( my_buttons, buttons_rect, my_font, color):
    for my_text, text_pos in my_buttons.items():
      text_surface = my_font.render(my_text, True, WHITE)
      rectangle = text_surface.get_rect( center = text_pos )
      rect = pygame.draw.ellipse( screen, color, rectangle )
      screen.blit(text_surface, rect)
      buttons_rect[my_text] = rect #save rect for 'my_text' button

''' Changes duty cycle of pwm_pin for the servo.
    Angle should be between 0-200 degrees for full range
    This should only be used for hardware pwm, software pwm is not supported here
'''
def angle_write(angle, pwm_pin):
    #1ms -90
    #2ms 90
    write_duty = -(0.0625/180)*(angle + 90) + 0.125
    pwm_pin.hardware_PWM(13, freq, int(write_duty * 1000000))
    return write_duty

''' Sets designated motor (In1,In2) to desired rotation and speed'''
def set_motor(motor, CW, speed, pwm_pin):
    if(CW):
        GPIO.output(motor[0], GPIO.HIGH)
        GPIO.output(motor[1], GPIO.LOW)
    else:
        GPIO.output(motor[1], GPIO.HIGH)
        GPIO.output(motor[0], GPIO.LOW)
    
    pwm_pin.ChangeDutyCycle(speed)
    
    
''' target_path contains .png files of target faces 
	This will return a list of 128-D vector encodings for each target and corresponding list of names
'''
def encode_targets( target_path, detector, shape_predictor, facerec ):
  list = []
  names = []
  for face in os.listdir(target_path):
    print(face)
    img = cv2.imread( os.path.join( target_path, face ) )
    print(img.shape)
    # Get dimensions for image
    top = 0
    bot = img.shape[0]
    left = 0
    right = img.shape[1]
    # Make rect
    rect = dlib.rectangle( left, top, right, bot)
    print(f'Processing file { os.path.join( target_path, face ) }' )
    shape = shape_predictor( img, rect )
    list.append(np.array( facerec.compute_face_descriptor( img, shape ) ) )
    names.append( face.split('.')[0] )
    
  return names, list
	
# Command line arguments for models
if len(sys.argv) != 5:
    print(
        "Call this program like this:\n"
        "   python3 sec_rob_full.py shape_predictor_5_face_landmarks.dat dlib_face_recognition_resnet_model_v1.dat Auth (debug OR desktop OR pi)\n"
        "You can download a trained facial shape predictor from:\n"
        "    http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\n")
    print( len(sys.argv) )
    exit()


    
# Bounding Box variables Opencv (BGR)
RED = ( 0, 0, 255 )
GREEN = ( 0, 255, 0 )
color = RED # Initialize color
downscale = 0.25
thresh = 0.6

# Set variables to script arguments
predictor_path = sys.argv[1] # Facial Landmark Detection model used for facial recognition model
face_rec_model_path = sys.argv[2] # Facial Recognition Model to encode Face  
targets_path = sys.argv[3] # Folder path containing authorized faces
exec_mode = sys.argv[4] # Tell what mode to execute the script in

# Load models
detector = dlib.get_frontal_face_detector() # Face detector
sp = dlib.shape_predictor( predictor_path ) # Landmark detector (5-point)
facerec = dlib.face_recognition_model_v1( face_rec_model_path ) # Used to encode detected face


# Compute face encodings for target faces
authorized_name, authorized_encoding = encode_targets( targets_path, detector, sp, facerec)
if( exec_mode == 'debug' ):
    print(authorized_name)

## Motor setup ##
GPIO.setmode(GPIO.BCM) # Use Broadcom Numbering

# Output pin vars
out_pins = [19, 5, 6, 4, 21, 20, 16]
EN_PIN = 4
AI1, AI2 = 5, 6
BI1, BI2 = 21, 20

# Motor vars
freq = 50
MOTOR_A = (AI1, AI2) # Right motor
a_rot = 1        # Motor rotation 0 = ccw, 1 = cw
MOTOR_B = (BI1, BI2) # Left motor
b_rot = 1

# Setup GPIO pins as out
for i in out_pins:
    GPIO.setup(i, GPIO.OUT)
    
# Control speeds of motor (Duty Cycle)
stopped, halfspeed, fullspeed = 0, 50, 100

#Initialize PWM
p_a = GPIO.PWM(out_pins[0], freq)
p_b = GPIO.PWM(out_pins[-1], freq)

GPIO.output(EN_PIN, GPIO.HIGH)

## Gimball Setup ##
gim_pwm_pin = 13
freq= 50
#vert_gim_pwm_pin = 18 # Controls camera angle
#vert_dc = 7.8125 # Duty cycle corresponding to some angle (here 7.8125 = 45)
vert_gim_pwm_pin = 12 # Hardware pwm

#Setup gimbal pins
#GPIO.setup(vert_gim_pwm_pin, GPIO.OUT)
#vert_p = GPIO.PWM(vert_gim_pwm_pin, freq)

pi_hw = pigpio.pi()

pi_hw.hardware_PWM(gim_pwm_pin, freq, 93750)
pi_hw.hardware_PWM(vert_gim_pwm_pin, freq, 86800) # Duty cycle corresponding to some angle (here 7.8125*10^4 = 45)
angle = 20
inc = 1
flip_dir = True

## Button Setup ##
buttons = [27]
QUIT = buttons[0]

for i in buttons:
    GPIO.setup(i, GPIO.IN, pull_up_down=GPIO.PUD_UP)

print("Running OpenCV version: " + str( cv2.__version__)) #4.5.2


if( exec_mode == 'pi' ): # Change PyGame screen to run on PiTFT for real world execution
    os.putenv('SDL_VIDEODRIVER', 'fbcon') #Display on piTFT
    os.putenv('SDL_FBDEV','/dev/fb0') # PiTFT, change frame buffer if monitor plugged in
os.putenv('SDL_MOUSEDRV', 'TSLIB') #track mouse clicks on piTFT
os.putenv('SDL_MOUSEDEV','/dev/input/touchscreen')

# Global variables
start_time = time.monotonic()

# begin pygame and set mouse visibility to 0
pygame.init()

pygame.mouse.set_visible(0) # Set mouse invisible on screen

# Variables for workspace
WHITE = 255, 255, 255
BLACK = 0, 0, 0
RED_PYGAME = 255, 0, 0
width = 320
height = 240
size = (width, height)
screen = pygame.display.set_mode(size)

## Pygame Variables ##
''' Draws text on the screen '''
def draw_text( text, text_pos, font, color ):
    for i in range( len( text ) ):
      text_surface = font.render(text[i], True, color)
      rect = text_surface.get_rect( center = text_pos[i])
      screen.blit( text_surface, rect )

# Font and button variables (Phase 0)
title_font = pygame.font.Font(None, 35)
start_button = {'Start Camera':(140,100)}
quit_button = {'Quit':(280,200)}

button_rects = {} # Place to store buttons drawn in pygame


vid_font = pygame.font.Font(None, 16)
vid_name = ['Name Placeholder', 'Name Placeholder', 'Name Placeholder' ]
vid_auth = ['Authorization', 'Authorization', 'Authorization' ]
step = int ( ( width - (40*2) ) / len(vid_name) )
print(f'Step Size {step}')

vid_name_pos = [ (40 + i*step, 210) for i in range(len(vid_name)) ]
vid_auth_pos = [ (40 + i*step, 230) for i in range( len(vid_name) ) ]

intruder_font = pygame.font.Font(None, 75)
intruder_text = ['INTRUDER', 'ALERT']
intruder_pos = [(width / 2, height / 2 - 25), ( width / 2, height / 2 + 25 )]
# Add text to the screen
#draw_text( vid_text, vid_pos, vid_font )
draw_text( vid_name, vid_name_pos, vid_font, WHITE )
draw_text( vid_auth, vid_auth_pos, vid_font, WHITE ) 
      
## Camera Setup
# frame size should be 320 x ( height - text_height )
video_width = width
video_height = 200

# Create a VideoCapture class object to stream video from Pi camera
videoCap = cv2.VideoCapture(0)

# Use isOpened() to check if VideoCapture class object is created successfully
if( not videoCap.isOpened()):
    print("Error: Can't find Pi Camera")
    quit()
else:
    print( "Success: Pi Camera is open")

# Use get() to confirm position
videoWidth = videoCap.get(cv2.CAP_PROP_FRAME_WIDTH)
videoHeight = videoCap.get(cv2.CAP_PROP_FRAME_HEIGHT)
print("Video resolution: " + str( int(videoWidth) )
      + "x" + str( int( videoHeight ) ) )

## Boundary variables
center = int( videoWidth / 2 )
left_bound = center - int( videoWidth * 0.1 )
right_bound = center + int( videoWidth * 0.1 )

left_found = False
right_found = False

# Color Detection Variables
H_l = int( 179 / 360 * 155 ) # Lower Hue 
H_u = int( 179 / 360 * 210 ) # Upper Hue 
S = int( 255 * 0.35 ) # Saturation 
V = int( 255 * 0.25 ) # Value
# Target color is green
lower_green = np.array( [H_l, S, V] ) # Lower HSV Threshold
upper_green = np.array( [ H_u, 255, 255 ] ) # Upper-HSV Threshold

## First read from the camera. Used to speed up testing
#returnBool, frame = videoCap.read()
#key = cv2.waitKey(1) & 0xFF
#surf = 0
#if returnBool:
  #surf = pygame.surfarray.make_surface( frame )
#else:
  #print( "Camera Error" )
  #quit()

# Draw Title Screen
# Create title screen buttons
draw_color( start_button, button_rects, title_font, GREEN )
draw_color( quit_button, button_rects, title_font, RED_PYGAME )

pygame.display.flip()

# Loop variables
is_quit = False
show_frame = True

# Facial recognition variables
bbs = [] # Holds Bounding Boxes for detected faces
names = [] # Holds names for faces detected 'Unknown' If not recognized

# Start PWM for motors
p_a.start(stopped)
p_b.start(stopped)

# Target detection loop variable
rotate_frames = 0 #Number of frames to rotate
start_sweep = False #Flag set when color detection sweep begins
stop_sweep = False #Flag Set when color detection sweep stops

# PyGame Phase flag
phase = 0 # 0 = title, 1 = Video screen

''' Below is for capturing framerate '''
# FPS variables
#frames = 0
#start = time.monotonic() 

while( not is_quit ):
    
    # PyGame Touch Control Logic
    for event in pygame.event.get():
        if( event.type is MOUSEBUTTONDOWN ):
            pos = pygame.mouse.get_pos()
        elif( event.type is MOUSEBUTTONUP ):
            # Get mouse position
            pos = pygame.mouse.get_pos()
            # Logic for button presses
            for( my_text, rect) in button_rects.items():
                if( rect.collidepoint(pos) ):
                    if(my_text == 'Quit'):
                        print("Quit Game")
                        is_quit = True
                    if( my_text == 'Start Camera' ):
                        print(" Start ")
                        phase = 1
                        
    if( phase == 0 ):
        # Clear screen
        screen.fill( BLACK )
        draw_color( start_button, button_rects, title_font, GREEN )
        draw_color( quit_button, button_rects, title_font, RED_PYGAME )
        pygame.display.flip()
    if( phase == 1 ):
        returnBool, frame = videoCap.read()
            
        frame = cv2.rotate( frame, cv2.ROTATE_180) #Rotate to orient image correctly
            
        # Check if video read was successful
        if( not returnBool ):
            print(error)
            break
        
        if (angle > 95 and angle < 105 ):
            if( angle == 96 or angle == 104 ): #Either beginning or end of scan
                # Start scan if the flag is not set
                if( not start_sweep ):
                    start_sweep = True
                else: #Scan had started, thus this is the end of the scan
                    stop_sweep = True
            ## Process frame to find green
            # Convert BGR to HSV
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV )
            # Create mask
            mask = cv2.inRange( hsv, lower_green, upper_green )
            #Check bounds
            for row in range(int(videoHeight)): # Check 6 pixel wide boundary on the left and right
                if( 255 in mask[row][left_bound-3:left_bound+3] ):
                    left_found = True
                if( 255 in mask[row][right_bound-3:right_bound+3] ):
                    right_found = True
            # Debug functions to confirm video capture
            if( exec_mode == 'debug' ):
                # Draw bounds on frame
                img = frame
                for row in range(int(videoHeight)-1):
                    img[row][left_bound-3:left_bound+3] = 0
                    img[row][right_bound-3:right_bound+3] = 0
                cv2.imshow('frame', img)  
                cv2.imshow('mask', mask)
                cv2.waitKey(1)
        
        ## Motor control ##
        if( start_sweep and stop_sweep ): # Check when the camera is facing forward
            if(exec_mode == 'debug'):
                print(angle, left_found, right_found )
            rotate_frames = 0
            if( left_found and right_found ): #Target centered stop
                set_motor( MOTOR_A, 0, stopped, p_a )
                set_motor( MOTOR_B, 0, stopped, p_b )
            elif( left_found ): #Pivot left
                set_motor( MOTOR_A, 1, halfspeed, p_a )
                set_motor( MOTOR_B, 1, halfspeed, p_b )
            elif( right_found ): #Pivot right
                set_motor( MOTOR_A, 0, halfspeed, p_a )
                set_motor( MOTOR_B, 0, halfspeed, p_b )
            else:
                set_motor( MOTOR_A, 1, halfspeed, p_a )
                set_motor( MOTOR_B, 1, halfspeed, p_b )
            # Reset Flags
            left_found = False
            right_found = False
            start_sweep = False
            stop_sweep = False
        
        
        if( rotate_frames == 10 ): # Stop rotation after a few frames
            set_motor( MOTOR_A, 0, stopped, p_a )
            set_motor( MOTOR_B, 0, stopped, p_b )
        
        # Update rotate_frames variable
        rotate_frames = rotate_frames if rotate_frames == 10 else rotate_frames + 1
            
        # Process every other frame
        if show_frame :
            # Perform facial recognition
            # Shrink frame to process and make processing easier
            frame_small = cv2.resize( frame, (0, 0), fx= downscale, fy = downscale )

            # Find bounding boxes of faces
            dets = detector( frame_small, 1 )
            
            #Reset names list
            names = []
            
            # Create bounding boxes on all detected faces
            bbs = [ [  ( int(box.left()*(1 / downscale)) , int( box.top() * ( 1 / downscale ) ) ),  ( int( box.right() * (1 / downscale) ), int( box.bottom() * (1 / downscale) ) ) ] for box in dets ]
            # Classify each face detected
            for box in dets:
              # Find shape and encode 
              shape = sp( frame_small, box )
              unknown_encode = np.array( facerec.compute_face_descriptor( frame_small, shape ) ) # Encode face
              name = 'unknown' # Face is initally unknown
              
              # Determine if found face is authorized
              # Calculate Euclidean distance between found faces and known faces
              distances = [ np.linalg.norm( unknown_encode - auth ) for auth in authorized_encoding ]
              
              # Different distance algorithm reducing operations on the distances could be used
              
              # Use smallest distance value
              min = np.argmin( distances )
              if distances[min] < thresh: # If distance below threshold, face recognized 
                name = authorized_name[min]
                
              names.append( name ) # Add name to list
        
        #Update name and authorization Show the first 3 faces
        num_names = len(names)
        
        for i in range(len(vid_name)):
            vid_name[i] = names[i] if num_names > i else '' #Update name
            if vid_name[i] != '':
                vid_auth[i] = 'Authorized' if vid_name[i] != 'unknown' else 'INTRUDER'
            else:
                vid_auth[i] = ''
            
        # Draw bounding boxes on frame
        for i, box in enumerate(bbs):
          # Determine color RED for unknown GREEN for authorized people
          color = RED if names[i] == 'unknown' else GREEN 
          frame = cv2.rectangle( frame, box[0], box[1], color , 2 )            
                
        # Process frame for pygame
        frame = cv2.cvtColor( frame, cv2.COLOR_BGR2RGB ) # Convert color_scale
        # Convert frame to pygame Image
        surf = pygame.surfarray.make_surface( frame ) # Make frame a pygame Surface
        surf = pygame.transform.scale( surf, (video_height, video_width) ) # Scale image
        # Rotate to correct orientation
        surf = pygame.transform.rotate( surf, -90 )
            
        # Clear screen
        screen.fill( BLACK )
        
        # Update screen
        screen.blit( surf, (0,0) ) # Draw the video frame
        draw_text( vid_name, vid_name_pos, vid_font, WHITE ) # Draw name text
        draw_text( vid_auth, vid_auth_pos, vid_font, WHITE ) # Draw authorization text
        if( 'unknown' in names ):
            draw_text( intruder_text, intruder_pos, intruder_font, RED_PYGAME )
        pygame.display.flip()
        
        # Flip show_frame variable
        show_frame = not show_frame
        
        ## Gimbal movement ##
        # Flip direction if gimbal rotated completely in a direction
        if (angle > 200):
            flip_dir = True
        elif (angle < 0):
            flip_dir = False
        
        # Increment/Decrement gimbal angle to scan environment
        if(flip_dir):
            angle = angle - inc
        else:
            angle = angle + inc
        
        duty_cycle = angle_write(float(angle), pi_hw) # Change PWM duty cycle to scan
        
        ''' Below is for getting framerate of program '''
        # FPS Counter
        #frames += 1
        #if( time.monotonic() - start > 1 ):
            #print(f'fps = {frames}')
            #frames = 0
            #start = time.monotonic()
        
    # Timeout 
    if ( time.monotonic() - start_time >= 360):
        break
    
    # Bailout button
    if ( not GPIO.input(QUIT) ):
        print("Quit Game")
        is_quit = True
        break
    
pi_hw.hardware_PWM(13, 0, 0) # Turn off hardware PWM
pi_hw.hardware_PWM(12, 0, 0) # Turn off hardware PWM
pi_hw.stop() # Close connection
GPIO.output(EN_PIN, GPIO.LOW) # Disable enable
GPIO.cleanup() 
pygame.quit() # Quit pygame
cv2.VideoCapture.release(videoCap) # Release camera
    


