Skip to content

Functions

find_task_by_id_function(id)

Finds a task by its ID.

Parameters:

Name Type Description Default
id int

The ID of the task to find.

required

Returns:

Name Type Description
str

The details of the task, or None if not found.

Examples:

>>> find_task_by_id_function(1)
"Task Details\nClean the kitchen\nIMPORTANT TASK CREATION TIME: 2020-07-01T12:00:00"
Source code in backend/Multi-Sensory Virtual AAGI/functions/find_task_by_id.py
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
def find_task_by_id_function(id):
    """
    Finds a task by its ID.
    Args:
      id (int): The ID of the task to find.
    Returns:
      str: The details of the task, or None if not found.
    Examples:
      >>> find_task_by_id_function(1)
      "Task Details\\nClean the kitchen\\nIMPORTANT TASK CREATION TIME: 2020-07-01T12:00:00"
    """
    with open('state_of_mind/task_list.json') as f:
        data = json.load(f)
        for task in data['tasks']:
            if task['id'] == id:
                response = "Task Details\n" + task['task'] + "\nIMPORTANT TASK CREATION TIME: " + task['task_created_time']
                return response
        return None

create_task_function()

Creates a task function for an AI assistant.

Returns:

Name Type Description
str

A string containing the task description, goals, and start time.

Examples:

>>> create_task_function()
"You are Alex an AI assistant that uses a very Large Language Model.
The user is asking you to perform a task. Write a small description of the task along with the expected goals and any additional information that would be helpful for someone performing this task who has no knowledge of the prior conversation. Clearly mention the start time for the task at the end."
Source code in backend/Multi-Sensory Virtual AAGI/functions/create_task.py
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
def create_task_function():
    """
    Creates a task function for an AI assistant.
    Args:
      None
    Returns:
      str: A string containing the task description, goals, and start time.
    Examples:
      >>> create_task_function()
      "You are Alex an AI assistant that uses a very Large Language Model.
      The user is asking you to perform a task. Write a small description of the task along with the expected goals and any additional information that would be helpful for someone performing this task who has no knowledge of the prior conversation. Clearly mention the start time for the task at the end."
    """
    chat = ChatOpenAI(temperature  = 0, model= 'gpt-3.5-turbo', openai_api_key=OPENAI_API_KEY)

    # Load the ability JSON file
    with open(os.path.join(STATE_DIR,'abilities.json'), 'r') as f:
        abilities_data = json.load(f)
    # Extract the names of all abilities and format them into a string separated by commas
    ability_names = "\nTools the assistant can access are " + ', '.join([ability['name'] for ability in abilities_data['abilities']])

    # create conversation string, each dialogue seperated by new line
    with open(os.path.join(STATE_DIR,'conversation.json'), 'r') as f:
        data = json.load(f)

    conversation_str = ''
    for message in data['conversation']:
        conversation_str += message['sender'] + ': ' + message['message']
        if message['file_upload'] != 'none':
            conversation_str += '\nFile Uploaded by ' + message['sender'] + ": " + message['file_upload']
        conversation_str += '\n'

    intro = "You are Alex an AI assistant that uses a very Large Language Model."

    instructions = "\nThe user is asking you to perform a task. Write a small description of the task along with the expected goals and any additional information that would be helpful for someone performing this task who has no knowledge of the prior conversation. Clearly mention the start time for the task at the end.\n"

    human_template = intro + conversation_str + ability_names + instructions 
    human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)

    chat_prompt = ChatPromptTemplate.from_messages([human_message_prompt])
    response = chat(chat_prompt.format_prompt().to_messages()).content

    return response

random_thought_function()

Generates a random thought for the AI assistant Alex.

Returns:

Name Type Description
str

The random thought generated for Alex.

Side Effects

Loads environment variables from the .env file. Reads from the STATE_DIR environment variable. Reads from the personality.txt, thought_bubble.txt, curiosity.txt, creativity.txt, fear.txt, happiness.txt, sadness.txt, and anger.txt files.

Examples:

>>> random_thought_function()
"What if we could go back in time and watch Leonardo da Vinci paint the Mona Lisa or witness the construction of the pyramids in ancient Egypt? And what about the future?"
Source code in backend/Multi-Sensory Virtual AAGI/functions/random_thought.py
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
def random_thought_function():
    """
    Generates a random thought for the AI assistant Alex.
    Args:
      None
    Returns:
      str: The random thought generated for Alex.
    Side Effects:
      Loads environment variables from the .env file.
      Reads from the STATE_DIR environment variable.
      Reads from the personality.txt, thought_bubble.txt, curiosity.txt, creativity.txt, fear.txt, happiness.txt, sadness.txt, and anger.txt files.
    Examples:
      >>> random_thought_function()
      "What if we could go back in time and watch Leonardo da Vinci paint the Mona Lisa or witness the construction of the pyramids in ancient Egypt? And what about the future?"
    """
    chat = ChatOpenAI(temperature  = 0, model= 'gpt-3.5-turbo', openai_api_key=OPENAI_API_KEY)

    personality = "Peronsality:\n" + open(os.path.join(STATE_DIR, "personality.txt")).read() 

    thought_bubble = "\nAlex's thought bubble\n" + open(os.path.join(STATE_DIR, "thought_bubble.txt")).read() 

    dir_path = STATE_DIR

    with open(os.path.join(dir_path, "curiosity.txt"), "r") as f:
        curiosity = str(f.read())

    with open(os.path.join(dir_path, "creativity.txt"), "r") as f:
        creativity = str(f.read())

    with open(os.path.join(dir_path, "fear.txt"), "r") as f:
        fear = str(f.read())

    with open(os.path.join(dir_path, "happiness.txt"), "r") as f:
        happiness = str(f.read())

    with open(os.path.join(dir_path, "sadness.txt"), "r") as f:
        sadness = str(f.read())

    with open(os.path.join(dir_path, "anger.txt"), "r") as f:
        anger = str(f.read())

    values_string = "\nAlex Emotion Parameters:\nHappiness: " + happiness + "\nSadness: " + sadness + "\nCreativity: " + creativity + "\nCuriosity: " + curiosity + "\nAnger: " + anger + "\nFear: " + fear 

    info = personality + thought_bubble + values_string

    human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
    assistant_message_prompt = AIMessagePromptTemplate.from_template(assistant_template)

    human_message_prompt1 = HumanMessagePromptTemplate.from_template(info + "\nIMPORTANT: ALEX IS HAVING A RANDOM THOUGHT NOW\nThought:\n")

    chat_prompt = ChatPromptTemplate.from_messages([human_message_prompt, assistant_message_prompt,human_message_prompt1])

    response = chat(chat_prompt.format_prompt().to_messages()).content
    return response

check_success_function(python_script, information, task_details)

Checks if a given task was successful.

Parameters:

Name Type Description Default
python_script str

The Python script to be evaluated.

required
information str

The output of the Python script.

required
task_details str

The details of the task.

required

Returns:

Name Type Description
tuple

A tuple containing a boolean value and a string. The boolean value indicates if the task was successful, and the string contains the reason for the choice.

Examples:

>>> check_success_function("from ability_functions.send_email import send_email_function\ndef function(text, receiver):\n    send_email_function(text, receiver)\n    return "success"\nresponse = function("Hi, i have sent the refund to you!", "Bell")\nwith open("tempfiles/output2792.txt", "w") as f:\n    f.write("Result " + response)", "success", "Task details: Send an email to Bell telling him you have sent the refund.")
(True, "The task was successfully completed, Bell has received an email stating that we have sent the refund to him.")
Source code in backend/Multi-Sensory Virtual AAGI/functions/check_success.py
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
def check_success_function(python_script, information, task_details):
    """
    Checks if a given task was successful.
    Args:
      python_script (str): The Python script to be evaluated.
      information (str): The output of the Python script.
      task_details (str): The details of the task.
    Returns:
      tuple: A tuple containing a boolean value and a string. The boolean value indicates if the task was successful, and the string contains the reason for the choice.
    Examples:
      >>> check_success_function("from ability_functions.send_email import send_email_function\\ndef function(text, receiver):\\n    send_email_function(text, receiver)\\n    return \"success\"\\nresponse = function(\"Hi, i have sent the refund to you!\", \"Bell\")\\nwith open(\"tempfiles/output2792.txt\", \"w\") as f:\\n    f.write(\"Result \" + response)", "success", "Task details: Send an email to Bell telling him you have sent the refund.")
      (True, "The task was successfully completed, Bell has received an email stating that we have sent the refund to him.")
    """
    chat = ChatOpenAI(temperature  = 0, model= 'gpt-3.5-turbo', openai_api_key=OPENAI_API_KEY)
    instruction1 = "If the task was succesful then output True, if not then False."
    instruction2 = "Give the reason for the choice"
    python_script_template = """from ability_functions.send_email import send_email_function
    def function(text, receiver):
        send_email_function(text, receiver)
        return "success"
    response = function("Hi, i have sent the refund to you!", "Bell")
    with open("tempfiles/output2792.txt", "w") as f:
        f.write("Result " + response)
    """
    information_template = "success"
    task_details_template = "Task details: Send an email to Bell telling him you have sent the refund."

    human_template = "Python Code\n" + python_script_template + "\nCode Output\n" + information_template + "\n" + task_details_template + "\n" + instruction1
    human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)

    assistant_template = "True"
    assistant_message_prompt = AIMessagePromptTemplate.from_template(assistant_template)

    human_message_prompt1 = HumanMessagePromptTemplate.from_template(instruction2)

    assistant_template1 = "The task was successfully completed, Bell has received an email stating that we have sent the refund to him."
    assistant_message_prompt1 = AIMessagePromptTemplate.from_template(assistant_template1)

    human_template2 = "Python Code\n" + python_script + "\nCode Output\n" + information + "\n" + task_details + "\n" + instruction1
    human_message_prompt2 = HumanMessagePromptTemplate.from_template(human_template2)

    chat_prompt = ChatPromptTemplate.from_messages([human_message_prompt, assistant_message_prompt, human_message_prompt1, assistant_message_prompt1, human_message_prompt2])
    check = chat(chat_prompt.format_prompt().to_messages()).content

    if check == "False":
        return check, ""

    assistant_message_prompt2 = AIMessagePromptTemplate.from_template(check)
    human_message_prompt3 = HumanMessagePromptTemplate.from_template(instruction2)

    chat_prompt = ChatPromptTemplate.from_messages([human_message_prompt, assistant_message_prompt, human_message_prompt1, assistant_message_prompt1, human_message_prompt2, assistant_message_prompt2, human_message_prompt3])
    response = chat(chat_prompt.format_prompt().to_messages()).content

    return check, response

perform_task_function(id)

Performs a task given an id.

Parameters:

Name Type Description Default
id int

The id of the task to be performed.

required

Returns:

Name Type Description
tuple

A tuple containing the status of the task, the number of seconds to wait for, and the response.

Examples:

>>> perform_task_function(1)
('done', 0, 'The user has sent an email to John.')
Source code in backend/Multi-Sensory Virtual AAGI/functions/perform_task.py
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
def perform_task_function(id):
    """
    Performs a task given an id.
    Args:
      id (int): The id of the task to be performed.
    Returns:
      tuple: A tuple containing the status of the task, the number of seconds to wait for, and the response.
    Examples:
      >>> perform_task_function(1)
      ('done', 0, 'The user has sent an email to John.')
    """
    chat = ChatOpenAI(temperature  = 0, model= 'gpt-3.5-turbo', openai_api_key=OPENAI_API_KEY)

    # create conversation string, each dialogue seperated by new line
    with open(os.path.join(STATE_DIR,'conversation.json'), 'r') as f:
        data = json.load(f)

    conversation_str = ''
    for message in data['conversation']:
        conversation_str += message['sender'] + ': ' + message['message']
        if message['file_upload'] != 'none':
            conversation_str += '\nFile Uploaded by ' + message['sender'] + ": " + message['file_upload']
        conversation_str += '\n'

    task_details = find_task_by_id_function(id)
    timestamp = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
    dt_obj = datetime.datetime.strptime(timestamp, '%Y_%m_%d_%H_%M_%S')
    # convert datetime object to desired format
    formatted_dt = dt_obj.strftime('%Y-%m-%d %H:%M:%S')

    instructions = "\nIf the task has a start time, and current time still hasn't reached that time then output the number of seconds to wait for. If the current time has crossed the start time given in the task then output True. ( 1 minute is 60, 5 minutes is 300, 10 minutes is 600 ) DO NOT OUTPUT True IF THE CURRENT TIME HAS NOT CROSSED THE START TIME. ONLY OUTPUT A NUMBER."

    human_template = """Task Details:
Task: The user is requesting to provide a guide in making chocolate chip cookies by 10am. The expected goal is to provide a clear and concise explanation of the steps involved in making chocolate chip cookies, including the ingredients and equipment needed. The additional information that would be helpful for someone performing this task who has no knowledge of the prior conversation is to provide any tips or tricks for making the perfect chocolate chip cookies, such as how to measure ingredients accurately and how to properly mix the dough.\nIMPORTANT TASK CREATION TIME: 2023-04-13 9:47:56\n\nCURRENT TIME: 2023-04-13 09:50:00\nIf current time has crossed start time output True, if it hasn't then output the number of seconds to wait for.""" 
    human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)

    assistant_template = "600"
    assistant_message_prompt = AIMessagePromptTemplate.from_template(assistant_template)

    human_template1 = """Task Details:
Task: The user has given a task to send an email to john at 9am. The expected goal is to send an email to John at 9.\nIMPORTANT TASK CREATION TIME: 2023-04-13 08:30:00\nCURRENT TIME: 2023-04-13 08:58:00\nIf current time has crossed start time output True, if it hasn't then output the number of seconds to wait for.""" 
    human_message_prompt1 = HumanMessagePromptTemplate.from_template(human_template1)

    assistant_template1 = "120"  
    assistant_message_prompt1 = AIMessagePromptTemplate.from_template(assistant_template1)

    human_template2 = """Task Details:
Task: The user has given a task to make a report about pandas after 6pm. The expected goal is to make a report after 6pm.\nIMPORTANT TASK CREATION TIME: 2023-04-13 17:46:10\nCURRENT TIME: 2023-04-13 18:18:00\nIf current time has crossed start time output True, if it hasn't then output the number of seconds to wait for.""" 
    human_message_prompt2 = HumanMessagePromptTemplate.from_template(human_template2)

    assistant_template2 = "True"  
    assistant_message_prompt2 = AIMessagePromptTemplate.from_template(assistant_template2)

    human_template3 = """Task Details:
Task: The user has given a task to send a joke after 7am. The expected goal is to send a joke to user after 7.\nIMPORTANT TASK CREATION TIME: 2023-04-13 06:34:20\nCURRENT TIME: 2023-04-13 06:57:00\nIf current time has crossed start time output True, if it hasn't then output the number of seconds to wait for."""
    human_message_prompt3 = HumanMessagePromptTemplate.from_template(human_template3)

    assistant_template3 = "180"  
    assistant_message_prompt3 = AIMessagePromptTemplate.from_template(assistant_template3)

    human_template4 = task_details + "\nCURRENT TIME: " + formatted_dt + instructions 
    human_message_prompt4 = HumanMessagePromptTemplate.from_template(human_template4)

    chat_prompt = ChatPromptTemplate.from_messages([human_message_prompt, assistant_message_prompt, human_message_prompt1, assistant_message_prompt1, human_message_prompt2, assistant_message_prompt2, human_message_prompt3, assistant_message_prompt3, human_message_prompt4])
    response = chat(chat_prompt.format_prompt().to_messages()).content
    print(response)
    if response != "True":
        return 'wait', int(response), ""

    while True:
        script_flag = 0
        python_script = create_python_script_task_function(id)
        print("Python Script is\n" + python_script)

        if python_script == 'none':
            script_flag = 1
            print('script_flag set to 1')

        if script_flag == 0:
            requirements = create_requirements_function(python_script)
            print("Requirements is\n" + requirements)
            if requirements == "empty":
                requirements = ""

            # write python file
            with open(f"tempfiles/python_script{id}.py", "w") as file:
                # Write the text to the file
                file.write(python_script)

            # write requirements file
            with open(f"tempfiles/requirements{id}.txt", "w") as file:
                # Write the text to the file
                file.write(requirements)

        while script_flag == 0:
            # Run multiple commands in succession
            commands = f'conda activate aagi && pip install -r tempfiles/requirements{id}.txt && python tempfiles/python_script{id}.py'
            result = subprocess.run(commands, capture_output=True, shell=True, universal_newlines=True)
            print("Commands finished running")
            if result.returncode == 0:
                print("Shell output:", result.stdout)
                break
            else:
                print("Error executing the command:", result.stderr)
                handle_error_function(python_script , requirements, result.stderr, id)

        information = ""
        if script_flag == 0:
            # Open the file for reading
            with open(f'tempfiles/output{id}.txt', 'r') as file:
                # Read the entire contents of the file
                information = file.read()
                print("Extra Information is\n" + information)

            check , response = check_success_function(python_script, information, task_details)
            if check == 'False':
                continue
            return 'done', 0, response

        response = talk_function(id)
        return 'done', 0, response

create_python_script_function(id)

Creates a python script based on the conversation and the tools available.

Parameters:

Name Type Description Default
id str

The id of the conversation.

required

Returns:

Name Type Description
str

The python script.

Examples:

>>> create_python_script_function("2020_08_20_12_00_00")
"import os\nimport sys\n# Get the absolute path to the current directory\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\n# Add the path to the root directory\nsys.path.append(os.path.join(current_dir, '..'))\n# Import the search_function from the search module, You need to append _function to the name of the tool\nfrom ability_functions.search import search_function\ndef python_function(text):\n    # search for relevant information on this topic\n    search_response = search_function(text)\n    # create an instructions that tells the natural language function to extract the search response and frame it as question\n    instructions = "Create a question in words that tells to divide the total amount spent by 10.\nYou can find the total amount spent by analyzing this piece of text\n" + search_response\n    # get the question\n    question = natural_language_task_function(instructions)\n    # pass the question to the calculator function to get the answer\n    answer = calculator_function(question)\n    # write the result to tempfiles/output{id}.txt file\n    with open("tempfiles/output{id}.txt", "w") as f:\n        f.write("Search response was " + search_response)\n        f.write("After Computation the answer is " + str(answer))\n\n#call the function\npython_function("Total Cost USA Latest Semiconductor Bill")"
Source code in backend/Multi-Sensory Virtual AAGI/functions/create_python_script.py
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
def create_python_script_function(id):
    """
    Creates a python script based on the conversation and the tools available.
    Args:
      id (str): The id of the conversation.
    Returns:
      str: The python script.
    Examples:
      >>> create_python_script_function("2020_08_20_12_00_00")
      "import os\\nimport sys\\n# Get the absolute path to the current directory\\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\\n# Add the path to the root directory\\nsys.path.append(os.path.join(current_dir, '..'))\\n# Import the search_function from the search module, You need to append _function to the name of the tool\\nfrom ability_functions.search import search_function\\ndef python_function(text):\\n    # search for relevant information on this topic\\n    search_response = search_function(text)\\n    # create an instructions that tells the natural language function to extract the search response and frame it as question\\n    instructions = \"Create a question in words that tells to divide the total amount spent by 10.\\nYou can find the total amount spent by analyzing this piece of text\\n\" + search_response\\n    # get the question\\n    question = natural_language_task_function(instructions)\\n    # pass the question to the calculator function to get the answer\\n    answer = calculator_function(question)\\n    # write the result to tempfiles/output{id}.txt file\\n    with open(\"tempfiles/output{id}.txt\", \"w\") as f:\\n        f.write(\"Search response was \" + search_response)\\n        f.write(\"After Computation the answer is \" + str(answer))\\n\\n#call the function\\npython_function(\"Total Cost USA Latest Semiconductor Bill\")"
    """
    chat = ChatOpenAI(temperature  = 0, model= 'gpt-3.5-turbo', openai_api_key=OPENAI_API_KEY)

    personality = open(os.path.join(STATE_DIR, "personality.txt")).read()

    # Load the ability JSON file
    with open(os.path.join(STATE_DIR,'abilities.json'), 'r') as f:
        abilities_data = json.load(f)

    abilities = "Tools: \n" + '\n'.join( [ability['name'] + ": " + ability['description'] + "\n" + ability['directions'] for ability in abilities_data['abilities']])

    # create conversation string, each dialogue seperated by new line
    with open(os.path.join(STATE_DIR,'conversation.json'), 'r') as f:
        data = json.load(f)

    conversation_str = ''
    for message in data['conversation']:
        conversation_str += message['sender'] + ': ' + message['message']
        if message['file_upload'] != 'none':
            conversation_str += '\nFile Uploaded by ' + message['sender'] + ": " + message['file_upload']
        conversation_str += '\n'   

    dt_obj = datetime.datetime.strptime(id, '%Y_%m_%d_%H_%M_%S')
    # convert datetime object to desired format
    formatted_dt = dt_obj.strftime('%Y-%m-%d %H:%M:%S')

    intial_text = personality + "\n" + abilities + "\n" + import_instructions + "\n" + instructions + "\nCurrent Time: " + formatted_dt + "\n"

    human_template = intial_text
    human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)

    human_message_prompt1 = HumanMessagePromptTemplate.from_template(human_template1)

    assistant_template1 = "none"
    assistant_message_prompt1 = AIMessagePromptTemplate.from_template(assistant_template1)

    human_message_prompt2 = HumanMessagePromptTemplate.from_template(human_template2)

    assistant_message_prompt2 = AIMessagePromptTemplate.from_template(assistant_template2)

    human_message_prompt3 = HumanMessagePromptTemplate.from_template(human_template3)

    assistant_message_prompt3 = AIMessagePromptTemplate.from_template(assistant_template3)

    human_message_prompt4 = HumanMessagePromptTemplate.from_template(human_template4)

    chat_prompt = ChatPromptTemplate.from_messages([human_message_prompt, human_message_prompt1, assistant_message_prompt1, human_message_prompt2, assistant_message_prompt2, human_message_prompt3, assistant_message_prompt3, human_message_prompt4])
    python_script = chat(chat_prompt.format_prompt(conversation_str=conversation_str, id = id).to_messages()).content
    return python_script

create_requirements_function(script)

Generates a list of packages for requirements.txt based on a given code.

Parameters:

Name Type Description Default
script str

The code to generate the list of packages from.

required

Returns:

Name Type Description
str

A list of packages for requirements.txt, or 'empty' if no packages are needed.

Examples:

>>> create_requirements_function("import numpy as np")
"numpy"
Source code in backend/Multi-Sensory Virtual AAGI/functions/create_requirements.py
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
def create_requirements_function(script):
    """
    Generates a list of packages for requirements.txt based on a given code.
    Args:
      script (str): The code to generate the list of packages from.
    Returns:
      str: A list of packages for requirements.txt, or 'empty' if no packages are needed.
    Examples:
      >>> create_requirements_function("import numpy as np")
      "numpy"
    """
    chat = ChatOpenAI(temperature  = 0, model= 'gpt-3.5-turbo', openai_api_key=OPENAI_API_KEY)
    human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)

    human_message_prompt1 = HumanMessagePromptTemplate.from_template(human_template1)

    assistant_template1 = packages
    assistant_message_prompt1 = AIMessagePromptTemplate.from_template(assistant_template1)

    human_message_prompt2 = HumanMessagePromptTemplate.from_template(human_template2)

    assistant_template2 = packages1
    assistant_message_prompt2 = AIMessagePromptTemplate.from_template(assistant_template2)

    human_message_prompt3 = HumanMessagePromptTemplate.from_template(human_template3)

    chat_prompt = ChatPromptTemplate.from_messages([human_message_prompt, human_message_prompt1, assistant_message_prompt1, human_message_prompt2, assistant_message_prompt2, human_message_prompt3])
    requirements = chat(chat_prompt.format_prompt(script=script).to_messages()).content

    if 'empty' in requirements:
        # if yes, then assign the value 'empty' to the variable
        requirements = 'empty'

    return requirements.replace('ability_functions' , "")

check_values_function()

Checks if all values in the emotion files are 0. If so, generates random values between 0 and 1 and writes them to the emotion files.

Returns:

Type Description

None

Side Effects

Writes new values to the emotion files.

Examples:

>>> check_values_function()
New values have been written to the Emotion files.
Source code in backend/Multi-Sensory Virtual AAGI/functions/check_values.py
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
def check_values_function():
    """
    Checks if all values in the emotion files are 0. If so, generates random values between 0 and 1 and writes them to the emotion files.
    Args:
      None
    Returns:
      None
    Side Effects:
      Writes new values to the emotion files.
    Examples:
      >>> check_values_function()
      New values have been written to the Emotion files.
    """
    # define the path to the directory where the text files are stored
    dir_path = STATE_DIR

    # open the text files and read the values
    curiosity = float(open(os.path.join(dir_path, "curiosity.txt")).read())
    creativity = float(open(os.path.join(dir_path, "creativity.txt")).read())
    fear = float(open(os.path.join(dir_path, "fear.txt")).read())
    happiness = float(open(os.path.join(dir_path, "happiness.txt")).read())
    sadness = float(open(os.path.join(dir_path, "sadness.txt")).read())
    anger = float(open(os.path.join(dir_path, "anger.txt")).read())

    # check if all values are 0
    if curiosity == 0 and creativity == 0 and fear == 0 and happiness == 0 and sadness == 0 and anger == 0:
        # generate random values between 0 and 1 but keep happiness, curiosity, and creativity high and remaining low
        curiosity = random.uniform(0.7, 1)
        creativity = random.uniform(0.7, 1)
        fear = random.uniform(0, 0.3)
        happiness = random.uniform(0.7, 1)
        sadness = random.uniform(0, 0.3)
        anger = random.uniform(0, 0.3)

        # write the new values back to the text files
        with open(os.path.join(dir_path, "curiosity.txt"), "w") as f:
            f.write(str(curiosity))
        with open(os.path.join(dir_path, "creativity.txt"), "w") as f:
            f.write(str(creativity))
        with open(os.path.join(dir_path, "fear.txt"), "w") as f:
            f.write(str(fear))
        with open(os.path.join(dir_path, "happiness.txt"), "w") as f:
            f.write(str(happiness))
        with open(os.path.join(dir_path, "sadness.txt"), "w") as f:
            f.write(str(sadness))
        with open(os.path.join(dir_path, "anger.txt"), "w") as f:
            f.write(str(anger))

        print("New values have been written to the Emotion files.")
    else:
        print("Values in the Emotion files are not all 0.")

handle_error_function(python_script, requirements, error, id)

Handles errors in python scripts and requirements files.

Parameters:

Name Type Description Default
python_script str

The python script to be modified.

required
requirements str

The requirements file to be modified.

required
error str

The error message.

required
id int

The id of the conversation.

required

Returns:

Name Type Description
None

No return value.

Side Effects

Writes modified python script and requirements files to the tempfiles directory.

Examples:

>>> handle_error_function(python_script_template, requirements_template, error_template, 1)
None
Source code in backend/Multi-Sensory Virtual AAGI/functions/handle_error.py
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
def handle_error_function(python_script, requirements, error, id):
    """
    Handles errors in python scripts and requirements files.
    Args:
      python_script (str): The python script to be modified.
      requirements (str): The requirements file to be modified.
      error (str): The error message.
      id (int): The id of the conversation.
    Returns:
      None: No return value.
    Side Effects:
      Writes modified python script and requirements files to the tempfiles directory.
    Examples:
      >>> handle_error_function(python_script_template, requirements_template, error_template, 1)
      None
    """
    chat = ChatOpenAI(temperature  = 0, model= 'gpt-3.5-turbo', openai_api_key=OPENAI_API_KEY)

    # create conversation string, each dialogue seperated by new line
    with open(os.path.join(STATE_DIR,'conversation.json'), 'r') as f:
        data = json.load(f)

    conversation_str = ''
    for message in data['conversation']:
        conversation_str += message['sender'] + ': ' + message['message']
        if message['file_upload'] != 'none':
            conversation_str += '\nFile Uploaded by ' + message['sender'] + ": " + message['file_upload']
        conversation_str += '\n'

    error_template = """ERROR: Could not find a version that satisfies the requirement pnds (from versions: none\nERROR: No matching distribution found for pnds"""

    human_template1 = "Error has been caused after running either the python_script{id}.py or installing requirements{id}.txt.\n" + error_template + "\nThis is the code in python_script{id}.py:" + python_script_template + "\nThe goal of this python code is to get information that can help in answering the question from the user in this conversation\n" + conversation_str + "\nThis is the text in requirements{id}.txt:" + requirements_template + "Give the modified text for python_script.py and requirements.txt to get rid of this error. DO NOT SAY ANYTHING ELSE. ONLY GENERATE THE PYTHON SCRIPT AND REQUIREMENTS FILE IN THE SPECIFIED FORMAT."
    human_message_prompt1 = HumanMessagePromptTemplate.from_template(human_template1)

    assistant_template1 = modified_text
    assistant_message_prompt1 = AIMessagePromptTemplate.from_template(assistant_template1)

    human_template2 = "Error has been caused after running either the python_script{id}.py or installing requirements{id}.txt.\n" + error + "\nThis is the code in python_script{id}.py:" + python_script + "\nThis is the code in requirements{id}.txt:" + requirements + "Give the modified text for these files to get rid of this error.  DO NOT SAY ANYTHING ELSE. ONLY GENERATE THE PYTHON SCRIPT AND REQUIREMENTS FILE IN THE SPECIFIED FORMAT."
    human_message_prompt2 = HumanMessagePromptTemplate.from_template(human_template2)

    chat_prompt = ChatPromptTemplate.from_messages([human_message_prompt1, assistant_message_prompt1, human_message_prompt2])
    response = chat(chat_prompt.format_prompt(id=id).to_messages()).content
    print(response)
    # extract the requirements
    requirements = re.findall(r'^\s*(\w+)', response, flags=re.MULTILINE)

    requirements_list = []
    for r in requirements:
        if r == 'python_script':
            break
        requirements_list.append(r)
    requirements_str = "\n".join(requirements_list[1:])  # ignore the first element which is 'requirements.txt'

    # extract the python script
    python_script = re.search(r'python_script(?:\d+)?\.py\s*\n(.+)', response, flags=re.DOTALL).group(1)

    # write python file
    with open(f"tempfiles/python_script{id}.py", "w") as file:
        # Write the text to the file
        file.write(python_script)

    # write requirements file
    with open(f"tempfiles/requirements{id}.txt", "w") as file:
        # Write the text to the file
        file.write(requirements_str)

dream_function()

Generates a dream based on the conversation and emotion parameters of the user.

Returns:

Name Type Description
str

A dream generated by OpenAI's LLMs.

Examples:

>>> dream_function()
"Ava was exploring a futuristic city filled with skyscrapers, holographic billboards, and flying cars. She was thrilled to see that the city was powered by quantum computing, and robots were everywhere."
Source code in backend/Multi-Sensory Virtual AAGI/functions/dream.py
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
def dream_function():
    """
    Generates a dream based on the conversation and emotion parameters of the user.
    Args:
      None
    Returns:
      str: A dream generated by OpenAI's LLMs.
    Examples:
      >>> dream_function()
      "Ava was exploring a futuristic city filled with skyscrapers, holographic billboards, and flying cars. She was thrilled to see that the city was powered by quantum computing, and robots were everywhere."
    """
    chat = ChatOpenAI(temperature  = 0, model= 'gpt-3.5-turbo', openai_api_key=OPENAI_API_KEY)

    # create conversation string, each dialogue seperated by new line
    with open(os.path.join(STATE_DIR,'conversation.json'), 'r') as f:
        data = json.load(f)

    conversation_str = '\nConversation: '
    for message in data['conversation']:
        conversation_str += message['sender'] + ': ' + message['message']
        if message['file_upload'] != 'none':
            conversation_str += '\nFile Uploaded by ' + message['sender'] + ": " + message['file_upload']
        conversation_str += '\n'

    personality = "Personality:\n" + open(os.path.join(STATE_DIR, "personality.txt")).read() 

    thought_bubble = "\nAlex's thought bubble\n" + open(os.path.join(STATE_DIR, "thought_bubble.txt")).read() 

    dir_path = STATE_DIR

    with open(os.path.join(dir_path, "curiosity.txt"), "r") as f:
        curiosity = str(f.read())

    with open(os.path.join(dir_path, "creativity.txt"), "r") as f:
        creativity = str(f.read())

    with open(os.path.join(dir_path, "fear.txt"), "r") as f:
        fear = str(f.read())

    with open(os.path.join(dir_path, "happiness.txt"), "r") as f:
        happiness = str(f.read())

    with open(os.path.join(dir_path, "sadness.txt"), "r") as f:
        sadness = str(f.read())

    with open(os.path.join(dir_path, "anger.txt"), "r") as f:
        anger = str(f.read())

    values_string = "\nAlex Emotion Parameters:\nHappiness: " + happiness + "\nSadness: " + sadness + "\nCreativity: " + creativity + "\nCuriosity: " + curiosity + "\nAnger: " + anger + "\nFear: " + fear 

    info = personality + conversation_str + thought_bubble + values_string

    human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
    assistant_message_prompt = AIMessagePromptTemplate.from_template(assistant_template)

    human_message_prompt1 = HumanMessagePromptTemplate.from_template(info + "\nIMPORTANT: ALEX IS DREAMING NOW\nDream:\n")

    chat_prompt = ChatPromptTemplate.from_messages([human_message_prompt, assistant_message_prompt,human_message_prompt1])

    response = chat(chat_prompt.format_prompt().to_messages()).content
    return response

start_task_message_function(well_defined_task)

Generates a response to a well-defined task.

Parameters:

Name Type Description Default
well_defined_task str

The task to respond to.

required

Returns:

Name Type Description
str

The response to the task.

Side Effects

Loads environment variables from the .env file.

Examples:

>>> start_task_message_function("Create a new user")
"I have received your task to create a new user. My name is Alex. I will proceed to execute the task accordingly."
Source code in backend/Multi-Sensory Virtual AAGI/functions/start_task_message.py
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
def start_task_message_function(well_defined_task):
    """
    Generates a response to a well-defined task.
    Args:
      well_defined_task (str): The task to respond to.
    Returns:
      str: The response to the task.
    Side Effects:
      Loads environment variables from the .env file.
    Examples:
      >>> start_task_message_function("Create a new user")
      "I have received your task to create a new user. My name is Alex. I will proceed to execute the task accordingly."
    """
    chat = ChatOpenAI(temperature  = 0, model= 'gpt-3.5-turbo', openai_api_key=OPENAI_API_KEY)

    assistant_template = "{well_defined_task}\nI need to tell the user I have received their task, and will proceed to execute it accordingly. My name is Alex. I should mention few key details about the task. But it should not be long. I SHOULD NOT ASK THE USER ANY QUESTION."
    assistant_message_prompt = AIMessagePromptTemplate.from_template(assistant_template)

    chat_prompt = ChatPromptTemplate.from_messages([assistant_message_prompt])
    response = chat(chat_prompt.format_prompt(well_defined_task=well_defined_task).to_messages()).content

    return response

determine_task_talk_function()

Determines whether a given conversation is a task or talk.

Returns:

Name Type Description
str

The response from the chatbot.

Side Effects

Loads environment variables from the .env file. Loads the abilities.json file. Loads the conversation.json file.

Examples:

>>> determine_task_talk_function()
'Talk'
Source code in backend/Multi-Sensory Virtual AAGI/functions/determine_task_talk.py
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
def determine_task_talk_function():
    """
    Determines whether a given conversation is a task or talk.
    Args:
      None
    Returns:
      str: The response from the chatbot.
    Side Effects:
      Loads environment variables from the .env file.
      Loads the abilities.json file.
      Loads the conversation.json file.
    Examples:
      >>> determine_task_talk_function()
      'Talk'
    """
    chat = ChatOpenAI(temperature  = 0, model= 'gpt-3.5-turbo', openai_api_key=OPENAI_API_KEY)

    # Load the ability JSON file
    with open(os.path.join(STATE_DIR,'abilities.json'), 'r') as f:
        abilities_data = json.load(f)
    # Extract the names of all abilities and format them into a string separated by commas
    ability_names = "\nTools the assistant can access are " + ', '.join([ability['name'] for ability in abilities_data['abilities']])

    # create conversation string, each dialogue seperated by new line
    with open(os.path.join(STATE_DIR,'conversation.json'), 'r') as f:
        data = json.load(f)

    conversation_str = ''
    for message in data['conversation']:
        conversation_str += message['sender'] + ': ' + message['message']
        if message['file_upload'] != 'none':
            conversation_str += '\nFile Uploaded by ' + message['sender'] + ": " + message['file_upload']
        conversation_str += '\n'

    intro = "You are Alex an AI assistant that uses a very Large Language Model."

    Human_template = intro + ability_names + instructions 
    Human_message_prompt = HumanMessagePromptTemplate.from_template(Human_template)

    human_message_prompt1 = HumanMessagePromptTemplate.from_template(human_template1)

    assistant_template1 = "Talk"
    assistant_message_prompt1 = AIMessagePromptTemplate.from_template(assistant_template1)

    human_message_prompt2 = HumanMessagePromptTemplate.from_template(human_template2)

    assistant_template2 = "Task"
    assistant_message_prompt2 = AIMessagePromptTemplate.from_template(assistant_template2)

    human_message_prompt3 = HumanMessagePromptTemplate.from_template(human_template3)

    assistant_template3 = "Talk"
    assistant_message_prompt3 = AIMessagePromptTemplate.from_template(assistant_template3)

    human_message_prompt4 = HumanMessagePromptTemplate.from_template(human_template4)

    chat_prompt = ChatPromptTemplate.from_messages([Human_message_prompt, human_message_prompt2, assistant_message_prompt2, human_message_prompt1, assistant_message_prompt1, human_message_prompt3, assistant_message_prompt3, human_message_prompt4])
    response = chat(chat_prompt.format_prompt(conversation_str=conversation_str).to_messages()).content
    return response

update_emotion_function(emotion)

Updates the emotion value in the state_of_mind directory.

Parameters:

Name Type Description Default
emotion str

The emotion to update.

required

Returns:

Type Description

None

Side Effects

Writes the updated emotion value to the state_of_mind directory.

Examples:

>>> update_emotion_function('happiness')
None
Source code in backend/Multi-Sensory Virtual AAGI/functions/update_emotions.py
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
def update_emotion_function(emotion):
    """
    Updates the emotion value in the state_of_mind directory.
    Args:
      emotion (str): The emotion to update.
    Returns:
      None
    Side Effects:
      Writes the updated emotion value to the state_of_mind directory.
    Examples:
      >>> update_emotion_function('happiness')
      None
    """
    # chat = ChatOpenAI(temperature  = 0, model= 'gpt-3.5-turbo', openai_api_key=OPENAI_API_KEY)
    # # create conversation string, each dialogue seperated by new line
    # with open(os.path.join(STATE_DIR,'conversation.json'), 'r') as f:
    #     data = json.load(f)

    # conversation_str = ''
    # for message in data['conversation']:
    #     conversation_str += message['sender'] + ': ' + message['message']
    #     if message['file_upload'] != 'none':
    #         conversation_str += '\nFile Uploaded by ' + message['sender'] + ": " + message['file_upload']
    #     conversation_str += '\n'

    # conversation_str_template = "human: Hello there, can you help me with something?\nassistant: What do you want? I'm really busy.\nhuman: Sorry to bother you, I just had a question about a product.\nassistant: Well, what is it? Spit it out.\nhuman: I was wondering if you could recommend a good laptop for gaming?\nassistant: Haven't you heard of Google? Why don't you go search for it yourself instead of wasting my time?"

    # human_template = "Conversation:\n{conversation_str_template}\nGive anger emotion value(ranges from 0 to 1), Only Ouput a number and nothing else\n"
    # human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)

    # assistant_template = "0.86"
    # assistant_message_prompt = AIMessagePromptTemplate.from_template(assistant_template)

    # conversation_str_template1 = "human: Hi, do you have a moment to assist me with something?\nassistant: Sure, what do you need help with?\nhuman: I'm interested in purchasing a new smartphone. Can you recommend a reliable brand?\nassistant: Of course, what's your budget and what features are you looking for?\nhuman: My budget is around $500 and I'm looking for a phone with a good camera and long battery life.\nassistant: Based on your budget and preferences, I would recommend the Google Pixel 4a. It has a great camera and good battery life. Would you like me to send you a link to purchase it?\nhuman: Yes, please. Thank you for your help!\nassistant: You're welcome. Let me know if you have any other questions."

    # human_template1 = "Conversation:\n{conversation_str_template1}\nGive happiness emotion value(ranges from 0 to 1), Only Ouput a number and nothing else\n"
    # human_message_prompt1 = HumanMessagePromptTemplate.from_template(human_template1)

    # assistant_template1 = "0.92"
    # assistant_message_prompt1 = AIMessagePromptTemplate.from_template(assistant_template1)

    # human_template2 = "Conversation:\n{conversation_str}\nGive {emotion} emotion value(ranges from 0 to 1):\n"
    # human_message_prompt2 = HumanMessagePromptTemplate.from_template(human_template2)

    # chat_prompt = ChatPromptTemplate.from_messages([human_message_prompt, assistant_message_prompt, human_message_prompt1 , assistant_message_prompt1, human_message_prompt2])
    # response = chat(chat_prompt.format_prompt(conversation_str = conversation_str, conversation_str_template =conversation_str_template, emotion = emotion, conversation_str_template1 = conversation_str_template1).to_messages()).content

    # random_number = random.uniform(0.1, 0.2)
    # base_num = float(response)
    # response = str(random_number + base_num)

    with open(f"state_of_mind/{emotion}.txt", "r") as f:
        val = str(f.read())

    random_number1 = random.uniform(0.1, 0.3)
    base_num = float(val)
    random_number2 = random.uniform(0.1, 0.3)
    response = str(round(random_number1 + base_num - random_number2,2))

    with open(f"state_of_mind/{emotion}.txt", "w") as file:
        # Write the text to the file
        file.write(response)

update_emotions_function()

Updates all emotion values in the state_of_mind directory.

Returns:

Type Description

None

Side Effects

Writes the updated emotion values to the state_of_mind directory.

Examples:

>>> update_emotions_function()
None
Source code in backend/Multi-Sensory Virtual AAGI/functions/update_emotions.py
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
def update_emotions_function():
    """
    Updates all emotion values in the state_of_mind directory.
    Args:
      None
    Returns:
      None
    Side Effects:
      Writes the updated emotion values to the state_of_mind directory.
    Examples:
      >>> update_emotions_function()
      None
    """
    update_emotion_function('happiness')
    update_emotion_function('sadness')
    update_emotion_function('anger')
    update_emotion_function('fear')
    update_emotion_function('creativity')
    update_emotion_function('curiosity')

summarize(text)

Summarizes a text using OpenAI's GPT-3.5-Turbo model.

Parameters:

Name Type Description Default
text str

The text to summarize.

required

Returns:

Name Type Description
str

The summarized text.

Examples:

>>> summarize("This is a long text.")
"This is a short summary of the text."
Source code in backend/Multi-Sensory Virtual AAGI/functions/update_conversation.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
def summarize(text):
    """
    Summarizes a text using OpenAI's GPT-3.5-Turbo model.
    Args:
      text (str): The text to summarize.
    Returns:
      str: The summarized text.
    Examples:
      >>> summarize("This is a long text.")
      "This is a short summary of the text."
    """
    chat = ChatOpenAI(temperature  = 0, model= 'gpt-3.5-turbo')
    human_template1 = "Your Job is to convert this text to clear, concise, readable summaries without missing any important details that could be important for someone to know to answer a question. Summarize this text without losing any important details. \n {text} ."
    human_message_prompt1 = HumanMessagePromptTemplate.from_template(human_template1)
    chat_prompt = ChatPromptTemplate.from_messages([human_message_prompt1])
    response = chat(chat_prompt.format_prompt(text = text).to_messages())
    return response.content

tiktoken_len(text)

Calculates the length of a text in tokens.

Parameters:

Name Type Description Default
text str

The text to calculate the length of.

required

Returns:

Name Type Description
int

The length of the text in tokens.

Examples:

>>> tiktoken_len("Hello world")
2
Source code in backend/Multi-Sensory Virtual AAGI/functions/update_conversation.py
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
def tiktoken_len(text):
    """
    Calculates the length of a text in tokens.
    Args:
      text (str): The text to calculate the length of.
    Returns:
      int: The length of the text in tokens.
    Examples:
      >>> tiktoken_len("Hello world")
      2
    """
    tokenizer = tiktoken.get_encoding('cl100k_base')
    tokens = tokenizer.encode(
        text,
        disallowed_special={}
    )
    return len(tokens)

update_conversation_function(user_response, sender, file_path, file_description)

Updates the conversation with a new message and summarizes the conversation if it is too long.

Parameters:

Name Type Description Default
user_response str

The user's response.

required
sender str

The sender of the message.

required
file_path str

The path of the file uploaded by the user.

required
file_description str

The description of the file uploaded by the user.

required

Returns:

Name Type Description
str

An empty string.

Side Effects

Writes the updated conversation to the conversation.json file.

Examples:

>>> update_conversation_function("Hello!", "User", "", "")
""
Source code in backend/Multi-Sensory Virtual AAGI/functions/update_conversation.py
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
def update_conversation_function(user_response, sender, file_path, file_description):
    """
    Updates the conversation with a new message and summarizes the conversation if it is too long.
    Args:
      user_response (str): The user's response.
      sender (str): The sender of the message.
      file_path (str): The path of the file uploaded by the user.
      file_description (str): The description of the file uploaded by the user.
    Returns:
      str: An empty string.
    Side Effects:
      Writes the updated conversation to the conversation.json file.
    Examples:
      >>> update_conversation_function("Hello!", "User", "", "")
      ""
    """
    with open(os.path.join(STATE_DIR, "num_memories.txt"), "r") as f:
        num_memories = int(f.read().strip())

    # create conversation string, each dialogue seperated by new line
    with open(os.path.join(STATE_DIR,'conversation.json'), 'r') as f:
        data = json.load(f)

    conversation_str = ''
    for message in data['conversation']:
        if message['sender'] == 'Summary':
            continue
        conversation_str += message['sender'] + ': ' + message['message']
        if message['file_upload'] != 'none':
            conversation_str += '\nFile Uploaded by ' + message['sender'] + ": " + message['file_upload']
        conversation_str += '\n'

    file_upload = 'none'
    if file_path != "":
        file_upload = "File has been uploaded by user, File path is\n" + file_path + ".\nFile Description is\n" + file_description

    persist_directory = 'memory'
    embeddings = OpenAIEmbeddings()

    print(tiktoken_len(conversation_str))
    if num_memories == 0:
        if tiktoken_len(conversation_str) < 600:
            print("Inside num memories 0 and less than 600  tokens")
            # Add a new message to the conversation
            new_message = {"sender": sender, "message": user_response, "file_upload": file_upload}
            data["conversation"].append(new_message)
            # Write the updated JSON data back to the file
            with open(os.path.join(STATE_DIR,'conversation.json'), 'w') as f:
                json.dump(data, f)

            return ""


    if tiktoken_len(conversation_str) > 600:
        print("inside of more than 600 tokens")
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size = 200,
            chunk_overlap  = 0,
            length_function = tiktoken_len,
        )
        texts = text_splitter.split_text(conversation_str)

        print("Summarising texts")
        summarized_texts = []
        for text in texts:
            summarized_texts.append(summarize(text))

        if num_memories == 0:
            print("Creating first database")
            vectordb = Chroma.from_texts(summarized_texts, embeddings, persist_directory=persist_directory)
            vectordb.persist()
            num_memories += 1
            with open(os.path.join(STATE_DIR, "num_memories.txt"), 'w') as f:
                f.write(str(1))
            f.close()
        else:
            vectordb = Chroma(persist_directory=persist_directory, embedding_function=embeddings)
            print("adding summarised texts")
            vectordb.add_texts(summarized_texts)

    vectordb = Chroma(persist_directory=persist_directory, embedding_function=embeddings)

    search_string = conversation_str + sender + user_response
    docs = vectordb.similarity_search(search_string, k = 3) 
    summarized_text = ""
    for i in docs:
        summarized_text = summarized_text + "\n" + i.page_content

    data['conversation'][0]['message'] = summarized_text

    if tiktoken_len(conversation_str) > 600:
        print("inside bottom check")
        summary = data['conversation'][0]
        data['conversation'] = [summary, {
        'sender': sender,
        'message': user_response,
        'file_upload': file_upload
        }]
    else:
        new_message = {"sender": sender, "message": user_response, "file_upload": file_upload}
        data["conversation"].append(new_message)   

    with open(os.path.join('state_of_mind','conversation.json'), 'w') as f:
        json.dump(data, f)

update_thought_bubble_function()

Updates the thought bubble with a modified version based on a conversation.

Returns:

Type Description

None

Side Effects

Writes the modified thought bubble to the file state_of_mind/thought_bubble.txt

Examples:

>>> update_thought_bubble_function()
None
Source code in backend/Multi-Sensory Virtual AAGI/functions/update_thought_bubble.py
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
def update_thought_bubble_function():
    """
    Updates the thought bubble with a modified version based on a conversation.
    Args:
      None
    Returns:
      None
    Side Effects:
      Writes the modified thought bubble to the file state_of_mind/thought_bubble.txt
    Examples:
      >>> update_thought_bubble_function()
      None
    """
    chat = ChatOpenAI(temperature  = 0, model= 'gpt-3.5-turbo', openai_api_key=OPENAI_API_KEY)
    # create conversation string, each dialogue seperated by new line
    with open(os.path.join(STATE_DIR,'conversation.json'), 'r') as f:
        data = json.load(f)

    conversation_str = ''
    for message in data['conversation']:
        conversation_str += message['sender'] + ': ' + message['message']
        if message['file_upload'] != 'none':
            conversation_str += '\nFile Uploaded by ' + message['sender'] + ": " + message['file_upload']
        conversation_str += '\n'

    with open(os.path.join(STATE_DIR, "thought_bubble.txt"), "r") as f:
        thought_bubble = f.read()

    thought_bubble_template = "[['Music', 'pop', 'rock', 'jazz'], ['Travel', 'beach', 'mountains', 'adventure'], ['Food', 'sushi', 'pasta', 'vegan']]"
    thought_bubble_modified_template = "[['TV Shows', 'Game of Thrones', 'Friends', 'The Office', 'Breaking Bad', 'Stranger Things'], ['Travel', 'beach', 'mountains', 'adventure'], ['Food', 'sushi', 'pasta', 'vegan']]"
    conversation_str_template = """human: Hi there!
assistant: Hello! How can I assist you today?
human: Can you tell me about some popular TV shows?
assistant: Of course! Some popular TV shows include Game of Thrones, Friends, The Office, Breaking Bad, and Stranger Things. Game of Thrones is known for its epic battles and fantasy world with dragons, while Friends is a classic sitcom about a group of friends in New York City. The Office is a mockumentary-style show about a group of employees working at a paper company, and Breaking Bad is a thrilling drama about a chemistry teacher who becomes a drug kingpin. Stranger Things is a sci-fi/horror series set in the 1980s that follows a group of kids as they uncover supernatural mysteries."""

    human_template = "Current thought bubble:\n{thought_bubble_template}\nConversation:\n{conversation_str_template}\nGive modified thought bubble:\n"
    human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)

    assistant_template = "{thought_bubble_modified_template}"
    assistant_message_prompt = AIMessagePromptTemplate.from_template(assistant_template)

    human_template1 = "Current thought bubble:\n{thought_bubble}\nConversation:\n{conversation_str}\nGive modified thought bubble:\n"
    human_message_prompt1 = HumanMessagePromptTemplate.from_template(human_template1)

    chat_prompt = ChatPromptTemplate.from_messages([human_message_prompt, assistant_message_prompt, human_message_prompt1])
    response = chat(chat_prompt.format_prompt(thought_bubble=thought_bubble, thought_bubble_template = thought_bubble_template, thought_bubble_modified_template=thought_bubble_modified_template, conversation_str = conversation_str, conversation_str_template =conversation_str_template).to_messages()).content

    with open(f"state_of_mind/thought_bubble.txt", "w") as file:
        # Write the text to the file
        file.write(response)

file_describe_function(file_path)

Generates a description of a file based on its type.

Parameters:

Name Type Description Default
file_path str

The path of the file to be described.

required

Returns:

Name Type Description
str

A description of the file.

Examples:

>>> file_describe_function('example.jpg')
'Image Uploaded. Description of the image.'
Source code in backend/Multi-Sensory Virtual AAGI/functions/file_describe.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
def file_describe_function(file_path):
    """
    Generates a description of a file based on its type.
    Args:
      file_path (str): The path of the file to be described.
    Returns:
      str: A description of the file.
    Examples:
      >>> file_describe_function('example.jpg')
      'Image Uploaded. Description of the image.'
    """
    file_extension = os.path.splitext(file_path)[1]
    if file_extension in ('.jpg', '.jpeg', '.png', '.gif', '.bmp'):
        print('Image Uploaded')
        file_description = image_to_text(file_path)[0]['generated_text']
        return file_description
    elif file_extension in ('.mp4', '.avi', '.wmv', '.mov', '.flv'):
        print('Video Uploaded')
        file_path_without_extension = os.path.splitext(file_path)[0]
        video = VideoFileClip(file_path)
        audio = video.audio
        try:
            audio.write_audiofile(file_path_without_extension + "audio_extracted.mp3")
            audio_file= open(file_path_without_extension + "audio_extracted.mp3", "rb")
            transcript = "Audio Transcript of the video is:\n"
            print("waiting for audio transcription")
            transcript = transcript + openai.Audio.transcribe("whisper-1", audio_file).text
        except Exception as e:
            transcript = "No audio"

        # Extract one frame per second from the video and process it
        transcript = transcript + "\nDescription of one frame of each second in the video is:\n"
        frames_per_second = 1
        fps = video.fps
        count = 0
        for i, frame in enumerate(video.iter_frames()):
            if i % (fps // frames_per_second) == 0:
                print(count)
                count = count + 1
                frame_path = f"{file_path_without_extension}_frame{i}.png"
                imageio.imwrite(frame_path, frame)
                file_description = image_to_text(frame_path)[0]['generated_text']
                transcript += file_description + "\n"
        return transcript
    elif file_extension in ('.mp3', '.wav', '.wma', '.aac'):
        print('Audio Uploaded')
        audio_file= open(file_path, "rb")
        print("waiting for audio transcription")
        transcript = openai.Audio.transcribe("whisper-1", audio_file).text
        return transcript
    else:
        print('Unknown file type')
        return ""

update_task_list_function(well_defined_task, id)

Updates a task list with a new task.

Parameters:

Name Type Description Default
well_defined_task str

The task to add to the list.

required
id int

The ID of the task.

required
Side Effects

Writes the updated task list to a JSON file.

Returns:

Type Description

None

Examples:

>>> update_task_list_function("Clean the kitchen", 1)
None
Source code in backend/Multi-Sensory Virtual AAGI/functions/update_task_list.py
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
def update_task_list_function(well_defined_task, id):
    """
    Updates a task list with a new task.
    Args:
      well_defined_task (str): The task to add to the list.
      id (int): The ID of the task.
    Side Effects:
      Writes the updated task list to a JSON file.
    Returns:
      None
    Examples:
      >>> update_task_list_function("Clean the kitchen", 1)
      None
    """
    dir_path = STATE_DIR
    # load the JSON file
    with open(os.path.join(dir_path, "task_list.json"), 'r') as f:
        data = json.load(f)

    # create a new task
    new_task = {
        "id": id,
        "task": well_defined_task,
        "task_created_time": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    }

    # insert the new task into the list of tasks
    data['tasks'].append(new_task)

    # save the updated JSON file
    with open(os.path.join(dir_path, "task_list.json"), 'w') as f:
        json.dump(data, f, indent=2)

talk_function(id)

Talks to an AI assistant.

Parameters:

Name Type Description Default
id str

The ID of the conversation.

required

Returns:

Name Type Description
str

The response of the AI assistant.

Side Effects

Creates a python script, requirements, and output files in the tempfiles directory.

Examples:

>>> talk_function('2020_08_20_12_30_00')
"Hello, how can I help you?"
Source code in backend/Multi-Sensory Virtual AAGI/functions/talk.py
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
def talk_function(id):
    """
    Talks to an AI assistant.
    Args:
      id (str): The ID of the conversation.
    Returns:
      str: The response of the AI assistant.
    Side Effects:
      Creates a python script, requirements, and output files in the tempfiles directory.
    Examples:
      >>> talk_function('2020_08_20_12_30_00')
      "Hello, how can I help you?"
    """
    script_flag = 0
    python_script = create_python_script_function(id)
    print("Python Script is\n" + python_script)

    if python_script == 'none':
        script_flag = 1
        print('script_flag set to 1')

    if script_flag == 0:
        requirements = create_requirements_function(python_script)
        print("Requirements is\n" + requirements)
        if requirements == "empty":
            requirements = ""

        # write python file
        with open(f"tempfiles/python_script{id}.py", "w") as file:
            # Write the text to the file
            file.write(python_script)

        # write requirements file
        with open(f"tempfiles/requirements{id}.txt", "w") as file:
            # Write the text to the file
            file.write(requirements)

    while script_flag == 0:
        # Run multiple commands in succession
        commands = f'conda activate aagi && pip install -r tempfiles/requirements{id}.txt && python tempfiles/python_script{id}.py'
        result = subprocess.run(commands, capture_output=True, shell=True, universal_newlines=True)
        print("Commands finished running")
        if result.returncode == 0:
            print("Shell output:", result.stdout)
            break
        else:
            print("Error executing the command:", result.stderr)
            handle_error_function(python_script , requirements, result.stderr, id)

    information = ""
    if script_flag == 0:
        # Open the file for reading
        with open(f'tempfiles/output{id}.txt', 'r') as file:
            # Read the entire contents of the file
            information = file.read()
            print("Extra Information is\n" + information)

    chat = ChatOpenAI(temperature  = 0, model= 'gpt-3.5-turbo', openai_api_key=OPENAI_API_KEY)

    personality = open(os.path.join(STATE_DIR, "personality.txt")).read() 

    thought_bubble = "Alex's thought bubble\n" + open(os.path.join(STATE_DIR, "thought_bubble.txt")).read() 

    instructions = """I am Alex an AI assistant, I am influenced heavily by my emotion parameters, which range from 0 to 1. These parameters dictate everything from your tone to the way in which I reply. For instance, when Happiness is high, I will reply with infectious enthusiasm. Conversely, if Fear is high, then I will talk in a paranoid, cautious demeanor. The same holds for other emotions as well and as well as for when certain parameters become low. Here are the current Emotion Parameters:"""

    dir_path = STATE_DIR

    with open(os.path.join(dir_path, "curiosity.txt"), "r") as f:
        curiosity = str(f.read())

    with open(os.path.join(dir_path, "creativity.txt"), "r") as f:
        creativity = str(f.read())

    with open(os.path.join(dir_path, "fear.txt"), "r") as f:
        fear = str(f.read())

    with open(os.path.join(dir_path, "happiness.txt"), "r") as f:
        happiness = str(f.read())

    with open(os.path.join(dir_path, "sadness.txt"), "r") as f:
        sadness = str(f.read())

    with open(os.path.join(dir_path, "anger.txt"), "r") as f:
        anger = str(f.read())

    with open(os.path.join(dir_path, "smell.txt"), "r") as f:
        smell = str(f.read())

    with open(os.path.join(dir_path, "taste.txt"), "r") as f:
        taste = str(f.read())

    with open(os.path.join(dir_path, "touch.txt"), "r") as f:
        touch = str(f.read())

    values_string = "\nMy Current Emotion Parameters:\nHappiness: " + happiness + "\nSadness: " + sadness + "\nCreativity: " + creativity + "\nCuriosity: " + curiosity + "\nAnger: " + anger + "\nFear: " + fear + "\n\nCurrent Sensory Parameters: " + "\nSmell: " + smell + "\nTaste: " + taste + "\nTouch: " + touch

    message_list = []

    dt_obj = datetime.datetime.strptime(id, '%Y_%m_%d_%H_%M_%S')
    # convert datetime object to desired format
    formatted_dt = dt_obj.strftime('%Y-%m-%d %H:%M:%S')
    assistant_template = personality + thought_bubble + instructions + values_string 
    assistant_message_prompt = AIMessagePromptTemplate.from_template(assistant_template)

    message_list.append(assistant_message_prompt)

    with open(os.path.join(STATE_DIR,'conversation.json'), 'r') as f:
        data = json.load(f)

    count = 0 
    for message in data['conversation']:
        if count == 0:
            text = message['sender'] + ":\n" + message['message'] 
            human_message_prompt = HumanMessagePromptTemplate.from_template(text)
            message_list.append(human_message_prompt)
            count += 1
            continue

        text = message['message']
        if message['file_upload'] != 'none':
            text += '\nFile Uploaded by ' + message['sender'] + ": " + message['file_upload'] 
        if message['sender'] == 'human':
            human_message_prompt = HumanMessagePromptTemplate.from_template(text)
            message_list.append(human_message_prompt)
            continue
        assistant_message_prompt1 = AIMessagePromptTemplate.from_template(text)
        message_list.append(assistant_message_prompt1)

    text = "The way you respond to me must be based on your current emotions parameters. Even though you are an AI model you will try to simulate having emotions using these emotion parameters. Do not tell me you are an AI model. Be natural like a human." + "\nAdditional Information:\n" + information + "\nCurrent Time: " + formatted_dt + "\n"
    human_message_prompt = HumanMessagePromptTemplate.from_template(text)
    message_list.insert(-1, human_message_prompt)
    chat_prompt = ChatPromptTemplate.from_messages(message_list)
    print(chat_prompt.format_prompt())
    response = chat(chat_prompt.format_prompt().to_messages()).content
    return response

create_python_script_task_function(id)

Creates a python script for a given task.

Parameters:

Name Type Description Default
id int

The ID of the task.

required

Returns:

Name Type Description
str

The python script for the task.

Examples:

>>> create_python_script_task_function(1)
'import os\nimport sys\n# Get the absolute path to the current directory\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\n# Add the path to the root directory\nsys.path.append(os.path.join(current_dir, '..'))\n# Import the search_function from the search module, You need to append _function to the name of the tool\nfrom ability_functions.search import search_function\ndef python_function(text):\n    # search for relevant information on this topic\n    search_response = search_function(text)\n    # create an instructions that tells the natural language function to extract the search response and frame it as question\n    instructions = "Create a question in words that tells to divide the total amount spent by 10.\nYou can find the total amount spent by analyzing this piece of text\n" + search_response\n    # get the question\n    question = natural_language_task_function(instructions)\n    # pass the question to the calculator function to get the answer\n    answer = calculator_function(question)\n    # write the result to tempfiles/output{id}.txt file\n    with open("tempfiles/output{id}.txt", "w") as f:\n        f.write("Search response was " + search_response)\n        f.write("After Computation the answer is " + str(answer))\n#call the function\npython_function("Total Cost USA Latest Semiconductor Bill")'
Source code in backend/Multi-Sensory Virtual AAGI/functions/create_python_script_task.py
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
def create_python_script_task_function(id):
    """
    Creates a python script for a given task.
    Args:
      id (int): The ID of the task.
    Returns:
      str: The python script for the task.
    Examples:
      >>> create_python_script_task_function(1)
      'import os\\nimport sys\\n# Get the absolute path to the current directory\\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\\n# Add the path to the root directory\\nsys.path.append(os.path.join(current_dir, '..'))\\n# Import the search_function from the search module, You need to append _function to the name of the tool\\nfrom ability_functions.search import search_function\\ndef python_function(text):\\n    # search for relevant information on this topic\\n    search_response = search_function(text)\\n    # create an instructions that tells the natural language function to extract the search response and frame it as question\\n    instructions = "Create a question in words that tells to divide the total amount spent by 10.\\nYou can find the total amount spent by analyzing this piece of text\\n" + search_response\\n    # get the question\\n    question = natural_language_task_function(instructions)\\n    # pass the question to the calculator function to get the answer\\n    answer = calculator_function(question)\\n    # write the result to tempfiles/output{id}.txt file\\n    with open("tempfiles/output{id}.txt", "w") as f:\\n        f.write("Search response was " + search_response)\\n        f.write("After Computation the answer is " + str(answer))\\n#call the function\\npython_function("Total Cost USA Latest Semiconductor Bill")'
    """
    chat = ChatOpenAI(temperature  = 0, model= 'gpt-3.5-turbo', openai_api_key=OPENAI_API_KEY)

    personality = open(os.path.join(STATE_DIR, "personality.txt")).read()

    task_details = find_task_by_id_function(id)

    # Load the ability JSON file
    with open(os.path.join(STATE_DIR,'abilities.json'), 'r') as f:
        abilities_data = json.load(f)

    abilities = "Tools: \n" + '\n'.join( [ability['name'] + ": " + ability['description'] + "\n" + ability['directions'] for ability in abilities_data['abilities']])

    # create conversation string, each dialogue seperated by new line
    with open(os.path.join(STATE_DIR,'conversation.json'), 'r') as f:
        data = json.load(f)

    conversation_str = ''
    for message in data['conversation']:
        conversation_str += message['sender'] + ': ' + message['message']
        if message['file_upload'] != 'none':
            conversation_str += '\nFile Uploaded by ' + message['sender'] + ": " + message['file_upload']
        conversation_str += '\n'   

    intial_text = personality + "\n" + abilities + "\n" + import_instructions + "\n" + instructions

    human_template = intial_text
    human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)

    human_message_prompt1 = HumanMessagePromptTemplate.from_template(human_template1)

    assistant_template1 = "none"
    assistant_message_prompt1 = AIMessagePromptTemplate.from_template(assistant_template1)

    human_message_prompt2 = HumanMessagePromptTemplate.from_template(human_template2)

    assistant_message_prompt2 = AIMessagePromptTemplate.from_template(assistant_template2)
    human_message_prompt3 = HumanMessagePromptTemplate.from_template(human_template3)

    assistant_message_prompt3 = AIMessagePromptTemplate.from_template(assistant_template3)
    human_message_prompt4 = HumanMessagePromptTemplate.from_template(human_template4)

    chat_prompt = ChatPromptTemplate.from_messages([human_message_prompt, human_message_prompt1, assistant_message_prompt1, human_message_prompt2, assistant_message_prompt2, human_message_prompt3, assistant_message_prompt3, human_message_prompt4])
    python_script = chat(chat_prompt.format_prompt(conversation_str=conversation_str, id = id, task_details=task_details).to_messages()).content
    return python_script

mental_simulation_function(id)

Simulates a conversation between Alex and a user.

Parameters:

Name Type Description Default
id int

The id of the task to be simulated.

required

Returns:

Name Type Description
str

The response from the conversation.

Side Effects

Loads environment variables from the .env file. Loads conversation.json, abilities.json, personality.txt, and thought_bubble.txt from the STATE_DIR.

Examples:

>>> mental_simulation_function(1)
"Alex's response to the conversation."
Source code in backend/Multi-Sensory Virtual AAGI/functions/mental_simulation.py
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
def mental_simulation_function(id):
    """
    Simulates a conversation between Alex and a user.
    Args:
      id (int): The id of the task to be simulated.
    Returns:
      str: The response from the conversation.
    Side Effects:
      Loads environment variables from the .env file.
      Loads conversation.json, abilities.json, personality.txt, and thought_bubble.txt from the STATE_DIR.
    Examples:
      >>> mental_simulation_function(1)
      "Alex's response to the conversation."
    """
    chat = ChatOpenAI(temperature  = 0, model= 'gpt-3.5-turbo', openai_api_key=OPENAI_API_KEY)

    # create conversation string, each dialogue seperated by new line
    with open(os.path.join(STATE_DIR,'conversation.json'), 'r') as f:
        data = json.load(f)

    conversation_str = '\nConversation:'
    for message in data['conversation']:
        conversation_str += message['sender'] + ': ' + message['message']
        if message['file_upload'] != 'none':
            conversation_str += '\nFile Uploaded by ' + message['sender'] + ": " + message['file_upload']
        conversation_str += '\n'

    # Load the ability JSON file
    with open(os.path.join(STATE_DIR,'abilities.json'), 'r') as f:
        abilities_data = json.load(f)

    abilities = "Tools: \n" + '\n'.join( [ability['name'] + ": " + ability['description'] + "\n" + ability['directions'] for ability in abilities_data['abilities']])

    task_details = find_task_by_id_function(id)

    personality = "Personality:\n" + open(os.path.join(STATE_DIR, "personality.txt")).read() 

    thought_bubble = "\nAlex's thought bubble\n" + open(os.path.join(STATE_DIR, "thought_bubble.txt")).read() 

    instructions = "\nYou are Alex, think about how you would implement this task."

    info = personality + thought_bubble + abilities + conversation_str + "\n" + task_details + instructions

    human_message_prompt = HumanMessagePromptTemplate.from_template(info)

    chat_prompt = ChatPromptTemplate.from_messages([human_message_prompt])

    response = chat(chat_prompt.format_prompt().to_messages()).content
    return response