Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
102 changes: 90 additions & 12 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ You agent can be based on an LLM hosted anywhere, you have available currently O
└── tests/ # contains all tests for the chat function
├── manual_agent_requests.py # allows testing of the docker container through API requests
├── manual_agent_run.py # allows testing of any LLM agent on a couple of example inputs
├── test_example_inputs.py # pytests for the example input files
├── test_index.py # pytests
└── test_module.py # pytests
```
Expand Down Expand Up @@ -164,7 +165,7 @@ This will start the chat function and expose it on port `8080` and it will be op
```bash
curl --location 'http://localhost:8080/2015-03-31/functions/function/invocations' \
--header 'Content-Type: application/json' \
--data '{"body":"{\"message\": \"hi\", \"params\": {\"conversation_id\": \"12345Test\", \"conversation_history\": [{\"type\": \"user\",
--data '{"body":"{\"conversationId\": \"12345Test\", \"messages\": [{\"role\": \"USER\", \"content\": \"hi\"}], \"user\": {\"type\": \"LEARNER\"}}"}'
```

#### Call Docker Container
Expand All @@ -183,21 +184,98 @@ http://localhost:8080/2015-03-31/functions/function/invocations
Body (stringified within body for API request):

```JSON
{"body":"{\"message\": \"hi\", \"params\": {\"conversation_id\": \"12345Test\", \"conversation_history\": [{\"type\": \"user\", \"content\": \"hi\"}]}}"}
{"body":"{\"conversationId\": \"12345Test\", \"messages\": [{\"role\": \"USER\", \"content\": \"hi\"}], \"user\": {\"type\": \"LEARNER\"}}"}
```

Body with optional Params:
```JSON
Body with optional fields:
```json
{
"message":"hi",
"params":{
"conversation_id":"12345Test",
"conversation_history":[{"type":"user","content":"hi"}],
"summary":" ",
"conversational_style":" ",
"question_response_details": "",
"include_test_data": true,
"conversationId": "<uuid>",
"messages": [
{ "role": "USER", "content": "<previous user message>" },
{ "role": "ASSISTANT", "content": "<previous assistant reply>" },
{ "role": "USER", "content": "<current message>" }
],
"user": {
"type": "LEARNER",
"preference": {
"conversationalStyle": "<stored style string>"
},
"taskProgress": {
"timeSpentOnQuestion": "30 minutes",
"accessStatus": "a good amount of time spent on this question today.",
"markedDone": "This question is still being worked on.",
"currentPart": {
"position": 0,
"timeSpentOnPart": "10 minutes",
"markedDone": "This part is not marked done.",
"responseAreas": [
{
"responseType": "EXPRESSION",
"totalSubmissions": 3,
"wrongSubmissions": 2,
"latestSubmission": {
"submission": "<student's last answer>",
"feedback": "<feedback text from evaluator>",
"answer": "<reference answer used for evaluation>"
}
}
]
}
}
},
"context": {
"summary": "<compressed conversation history>",
"set": {
"title": "Fundamentals",
"number": 2,
"description": "<set description>"
},
"question": {
"title": "Understanding Polymorphism",
"number": 3,
"guidance": "<teacher guidance>",
"content": "<master question content>",
"estimatedTime": "15-25 minutes",
"parts": [
{
"position": 0,
"content": "<part prompt>",
"answerContent": "<part answer>",
"workedSolutionSections": [
{ "position": 0, "title": "Step 1", "content": "..." }
],
"structuredTutorialSections": [
{ "position": 0, "title": "Hint", "content": "..." }
],
"responseAreas": [
{
"position": 0,
"responseType": "EXPRESSION",
"answer": "<reference answer>",
"preResponseText": "<label shown before input>"
}
]
}
]
}
}
}
```

Response:

```json
{
"output": {
"role": "ASSISTANT",
"content": "<assistant reply text>"
},
"metadata": {
"summary": "<updated conversation summary>",
"conversationalStyle": "<updated style string>",
"processingTimeMs": 1234
}
}
```

Expand Down
44 changes: 14 additions & 30 deletions index.py
Original file line number Diff line number Diff line change
@@ -1,52 +1,36 @@
import json
from pydantic import ValidationError

from lf_toolkit.chat import ChatRequest
from src.module import chat_module
from src.agent.utils.types import JsonType

def handler(event: JsonType, context):

def handler(event, context):
"""
Lambda handler function
"""
# Log the input event for debugging purposes
# print("Received event:", " ".join(json.dumps(event, indent=2).splitlines()))

if "body" in event:
try:
event = json.loads(event["body"])
except json.JSONDecodeError:
return {
"statusCode": 400,
"body": "Invalid JSON format in the body or body not found. Please check the input."
"body": "Invalid JSON format in the body. Please check the input.",
}

if "message" not in event:
return {
"statusCode": 400,
"body": "Missing 'message' key in event. Please confirm the key in the json body."
}
if "params" not in event:
return {
"statusCode": 400,
"body": "Missing 'params' key in event. Please confirm the key in the json body. Make sure it contains the necessary conversation_id."
}

message = event.get("message")
params = event.get("params")
try:
request = ChatRequest.model_validate(event)
except ValidationError as e:
return {"statusCode": 400, "body": e.json()}

try:
chatbot_response = chat_module(message, params)
result = chat_module(request)
except Exception as e:
return {
"statusCode": 500,
"body": f"An error occurred within the chat_module(): {str(e)}"
"body": f"An error occurred within the chat_module(): {str(e)}",
}

# Create a response
response = {
"statusCode": 200,
"body": json.dumps(chatbot_response)
}

# Log the response for debugging purposes
response = {"statusCode": 200, "body": result.model_dump_json()}
print("Returning response:", " ".join(json.dumps(response, indent=2).splitlines()))

return response
return response
19 changes: 8 additions & 11 deletions src/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,25 +173,22 @@ def pretty_response_value(self, event: dict) -> str:
return event["messages"][-1].content

agent = BaseAgent()
def invoke_base_agent(query: str, conversation_history: list, summary: str, conversationalStyle: str, question_response_details: str, session_id: str) -> InvokeAgentResponseType:
def invoke_base_agent(messages: list, summary: str, conversationalStyle: str, question_response_details: str, session_id: str) -> InvokeAgentResponseType:
"""
Call an agent that has no conversation memory and expects to receive all past messages in the params and the latest human request in the query.
Call an agent that has no conversation memory and expects to receive all messages
(including the current user message) and returns the chatbot response with updated metadata.
If conversation history longer than X, the agent will summarize the conversation and will provide a conversational style analysis.
"""
print(f'in invoke_base_agent(), thread_id = {session_id}')

config = {"configurable": {"thread_id": session_id, "summary": summary, "conversational_style": conversationalStyle, "question_response_details": question_response_details}}
response_events = agent.app.invoke({"messages": conversation_history, "summary": summary, "conversational_style": conversationalStyle}, config=config, stream_mode="values") #updates
pretty_printed_response = agent.pretty_response_value(response_events) # get last event/ai answer in the response

# Gather Metadata from the agent
summary = agent.get_summary()
conversationalStyle = agent.get_conversational_style()
response_events = agent.app.invoke({"messages": messages, "summary": summary, "conversational_style": conversationalStyle}, config=config, stream_mode="values")
output = agent.pretty_response_value(response_events)

print(f'in invoke_base_agent(), response generated by chatbot')

return {
"input": query,
"output": pretty_printed_response,
"intermediate_steps": [str(summary), conversationalStyle, conversation_history]
"output": output,
"summary": str(agent.get_summary()),
"conversationalStyle": agent.get_conversational_style(),
}
Loading