Skip to content

Commit 3d5afc2

Browse files
committed
Refactored base layer. Adding further unittests
1 parent 44ba500 commit 3d5afc2

File tree

14 files changed

+707
-677
lines changed

14 files changed

+707
-677
lines changed

Dockerfile

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -24,14 +24,4 @@ COPY handler.py ./app/
2424
COPY tests/*.py ./app/tests/
2525
COPY tools/*.py ./app/tools/
2626

27-
# Keep these in for backwards compatibility
28-
ENV REQUEST_SCHEMA_URL https://raw.githubusercontent.com/lambda-feedback/request-response-schemas/master/request.json
29-
ENV RESPONSE_SCHEMA_URL https://raw.githubusercontent.com/lambda-feedback/request-response-schemas/master/responsev2.json
30-
31-
# New schemas for evaluation function.
32-
ENV EVAL_REQUEST_SCHEMA_URL https://raw.githubusercontent.com/lambda-feedback/request-response-schemas/master/request/eval.json
33-
ENV PREVIEW_REQUEST_SCHEMA_URL https://raw.githubusercontent.com/lambda-feedback/request-response-schemas/master/request/preview.json
34-
35-
ENV EVAL_RESPONSE_SCHEMA_URL https://raw.githubusercontent.com/lambda-feedback/request-response-schemas/master/response/eval.json
36-
ENV HEALTH_RESPONSE_SCHEMA_URL https://raw.githubusercontent.com/lambda-feedback/request-response-schemas/master/response/healthcheck.json
37-
ENV PREVIEW_RESPONSE_SCHEMA_URL https://raw.githubusercontent.com/lambda-feedback/request-response-schemas/master/response/preview.json
27+
ENV SCHEMAS_URL = https://raw.githubusercontent.com/lambda-feedback/request-response-schemas/579-adding-preview-command

handler.py

Lines changed: 44 additions & 254 deletions
Original file line numberDiff line numberDiff line change
@@ -1,280 +1,70 @@
1-
from typing import Any, Dict
2-
31
from evaluation_function_utils.errors import EvaluationException
42

5-
from .evaluation import evaluation_function # type: ignore
6-
from .tools import docs, parse
7-
from .tools import validate as v
8-
from .tools.healthcheck import healthcheck
9-
10-
"""
11-
Update to evaluation function to allow preview command.
12-
Some functions do not currently have a preview.py,
13-
so we define an empty function.
14-
"""
15-
try:
16-
from .preview import preview_function # type: ignore
17-
except ImportError:
18-
19-
def preview_function(response: Any, params: Any) -> Dict:
20-
return {"preview": response}
21-
22-
23-
"""
24-
Command Handler Functions.
25-
"""
26-
27-
28-
def handle_unknown_command(command):
29-
"""
30-
Function to create the response when the command is unknown.
31-
---
32-
This function does not handle any of the request body so it is neither parsed or
33-
validated against a schema. Instead, a simple message is returned telling the
34-
requestor that the command isn't allowed.
35-
"""
36-
return {"error": {"message": f"Unknown command '{command}'."}}
37-
38-
39-
def handle_healthcheck_command():
40-
"""
41-
Function to create the response when commanded to perform a healthcheck.
42-
---
43-
This function does not handle any of the request body so it is neither parsed or
44-
validated against a schema.
45-
"""
46-
return {"command": "healthcheck", "result": healthcheck()}
47-
48-
49-
def handle_preview_command(event):
50-
"""
51-
Function to create the response when commanded to preview an answer.
52-
---
53-
This function attempts to parse the request body, performs schema validation and
54-
attempts to run the evaluation function on the given parameters.
55-
56-
If any of these fail, a message is returned and an error field is passed if more
57-
information can be provided.
58-
"""
59-
body, parse_error = parse.parse_body(event)
3+
from .tools import commands, docs, validate
4+
from .tools.parse import ParseError
5+
from .tools.utils import ErrorResponse, HandlerResponse, JsonType, Response
6+
from .tools.validate import ResBodyValidators, ValidationError
607

61-
if parse_error:
62-
return {"error": parse_error}
638

64-
request_error = v.validate_preview_request(body)
65-
66-
if request_error:
67-
return {"error": request_error}
68-
69-
response = body["response"]
70-
params = body.get("params", dict())
71-
72-
try:
73-
result = preview_function(response, params)
74-
# Catch the custom EvaluationException (from evaluation_function_utils) first
75-
except EvaluationException as e:
76-
return {"error": e.error_dict}
77-
78-
except Exception as e:
79-
return {
80-
"error": {
81-
"message": "An exception was raised while executing the preview function.",
82-
"detail": str(e) if str(e) != "" else repr(e),
83-
}
84-
}
85-
86-
return {"command": "preview", "result": result}
87-
88-
89-
def handle_eval_command(event):
90-
"""
91-
Function to create the response when commanded to evaluate an answer.
92-
---
93-
This function attempts to parse the request body, performs schema validation and
94-
attempts to run the evaluation function on the given parameters.
95-
96-
If any of these fail, a message is returned and an error field is passed if more
97-
information can be provided.
98-
"""
99-
body, parse_error = parse.parse_body(event)
100-
101-
if parse_error:
102-
return {"error": parse_error}
103-
104-
request_error = v.validate_eval_request(body)
105-
106-
if request_error:
107-
return {"error": request_error}
108-
109-
response = body["response"]
110-
answer = body["answer"]
111-
params = body.get("params", dict())
112-
113-
try:
114-
result = evaluation_function(response, answer, params)
115-
116-
# Catch the custom EvaluationException (from evaluation_function_utils) first
117-
except EvaluationException as e:
118-
return {"error": e.error_dict}
119-
120-
except Exception as e:
121-
return {
122-
"error": {
123-
"message": "An exception was raised while executing the evaluation function.",
124-
"detail": str(e) if str(e) != "" else repr(e),
125-
}
126-
}
127-
128-
# If a list of "cases" wasn't provided, we don't have any other way to get feedback
129-
cases = params.get("cases", [])
130-
if len(cases) == 0:
131-
return {"command": "eval", "result": result}
132-
133-
# Determine what feedback to provide based on cases
134-
matched_case, warnings = feedback_from_cases(response, params, cases)
135-
if matched_case:
136-
result["feedback"] = matched_case["feedback"]
137-
result["matched_case"] = matched_case["id"]
138-
139-
# Override is_correct provided by the original block by the case 'mark'
140-
if "mark" in matched_case:
141-
result["is_correct"] = bool(int(matched_case["mark"]))
142-
143-
# Add warnings out output if any were encountered
144-
if len(warnings) != 0:
145-
result["warnings"] = warnings
146-
147-
return {"command": "eval", "result": result}
148-
149-
150-
def feedback_from_cases(response, params, cases):
151-
"""
152-
Attempt to find the correct feedback from a list of cases.
153-
Returns a matched 'case' (the full object), and optional list of warnings
154-
"""
155-
156-
# A list of "cases" was provided, try matching to each of them
157-
matches = []
158-
warnings = []
159-
eval_function_feedback = []
160-
for i, case in enumerate(cases):
161-
# Validate the case block has an answer and feedback
162-
if "answer" not in case:
163-
warnings += [{"case": i, "message": "Missing answer field"}]
164-
continue
165-
166-
if "feedback" not in case:
167-
warnings += [{"case": i, "message": "Missing feedback field"}]
168-
continue
169-
170-
# Merge current evaluation params with any specified in case
171-
case_params = case.get("params", {})
172-
173-
# Run the evaluation function based on this case's answer
174-
try:
175-
res = evaluation_function(
176-
response, case.get("answer"), {**params, **case_params}
177-
)
178-
179-
except EvaluationException as e:
180-
warnings += [{"case": i, **e.error_dict}]
181-
continue
182-
183-
except Exception as e:
184-
warnings += [
185-
{
186-
"case": i,
187-
"message": "An exception was raised while executing the evaluation function.",
188-
"detail": str(e) if str(e) != "" else repr(e),
189-
}
190-
]
191-
continue
192-
193-
# Function should always return an 'is_correct' if no errors were raised
194-
if not "is_correct" in res:
195-
warnings += [
196-
{
197-
"case": i,
198-
"message": "is_correct missing from function output",
199-
}
200-
]
201-
continue
202-
203-
# This case matches the response, add it's index to the list of matches
204-
if res.get("is_correct"):
205-
matches += [i]
206-
eval_function_feedback += [res.get("feedback", "")]
9+
def handle_command(event: JsonType, command: str) -> HandlerResponse:
10+
if command in ("docs-dev", "docs"):
11+
return docs.send_dev_docs()
20712

208-
if len(matches) == 0:
209-
return None, warnings
13+
elif command == "docs-user":
14+
return docs.send_user_docs()
21015

211-
# Select the matched case
212-
matched_case = cases[matches[0]]
213-
matched_case["id"] = matches[0]
214-
if not matched_case["params"].get("override_eval_feedback", False):
215-
separator = "<br />" if len(eval_function_feedback[0]) > 0 else ""
216-
matched_case.update(
217-
{
218-
"feedback": matched_case.get("feedback", "")
219-
+ separator
220-
+ eval_function_feedback[0]
221-
}
222-
)
16+
elif command in ("eval", "grade"):
17+
response = commands.evaluate(event)
18+
validator = ResBodyValidators.EVALUATION
22319

224-
if len(matches) == 1:
225-
# warnings += [{"case": matches[0]}]
226-
return matched_case, warnings
20+
elif command == "preview":
21+
response = commands.preview(event)
22+
validator = ResBodyValidators.PREVIEW
22723

24+
elif command == "healthcheck":
25+
response = commands.healthcheck()
26+
validator = ResBodyValidators.HEALTHCHECK
22827
else:
229-
s = ", ".join([str(m) for m in matches])
230-
warnings += [
231-
{
232-
"message": f"Cases {s} were matched. Only the first one's feedback was returned"
233-
}
234-
]
235-
return matched_case, warnings
28+
response = Response(
29+
error=ErrorResponse(message=f"Unknown command '{command}'.")
30+
)
31+
validator = ResBodyValidators.GENERIC
23632

33+
validate.body(response, validator)
23734

238-
"""
239-
Main Handler Function
240-
"""
35+
return response
24136

24237

243-
def handler(event, context={}):
38+
def handler(event: JsonType, context: JsonType = {}) -> HandlerResponse:
24439
"""
24540
Main function invoked by AWS Lambda to handle incoming requests.
24641
---
247-
This function invokes the handler function for that particular command and returns
248-
the result. It also performs validation on the response body to make sure it follows
42+
This function invokes the handler function for that particular command
43+
and returns
44+
the result. It also performs validation on the response body to make sure
45+
it follows
24946
the schema set out in the request-response-schema repo.
25047
"""
25148
headers = event.get("headers", dict())
25249
command = headers.get("command", "eval")
25350

254-
if command == "healthcheck":
255-
response = handle_healthcheck_command()
256-
response_error = v.validate_healthcheck_response(response)
257-
258-
# Remove once all funcs update to V2
259-
elif command == "eval" or command == "grade":
260-
response = handle_eval_command(event)
261-
response_error = v.validate_eval_response(response)
262-
263-
elif command == "preview":
264-
response = handle_preview_command(event)
265-
response_error = v.validate_preview_response(response)
51+
try:
52+
return handle_command(event, command)
26653

267-
elif command == "docs-dev" or command == "docs":
268-
return docs.send_dev_docs()
54+
except ParseError as e:
55+
error = ErrorResponse(message=e.message, detail=e.error_thrown)
26956

270-
elif command == "docs-user":
271-
return docs.send_user_docs()
57+
except ValidationError as e:
58+
error = ErrorResponse(message=e.message, detail=e.error_thrown)
27259

273-
else:
274-
response = handle_unknown_command(command)
275-
response_error = v.validate_response(response)
60+
except EvaluationException as e:
61+
error = e.error_dict
27662

277-
if response_error:
278-
return {"error": response_error}
63+
except Exception as e:
64+
error = ErrorResponse(
65+
message="An exception was raised while "
66+
"executing the preview function.",
67+
detail=(str(e) if str(e) != "" else repr(e)),
68+
)
27969

280-
return response
70+
return Response(error=error)

requirements.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
11
jsonschema
22
requests
3-
evaluation-function-utils
3+
evaluation-function-utils
4+
typing_extensions

0 commit comments

Comments
 (0)