|
1 | | -from typing import Any, Dict |
2 | | - |
3 | 1 | from evaluation_function_utils.errors import EvaluationException |
4 | 2 |
|
5 | | -from .evaluation import evaluation_function # type: ignore |
6 | | -from .tools import docs, parse |
7 | | -from .tools import validate as v |
8 | | -from .tools.healthcheck import healthcheck |
9 | | - |
10 | | -""" |
11 | | -Update to evaluation function to allow preview command. |
12 | | -Some functions do not currently have a preview.py, |
13 | | -so we define an empty function. |
14 | | -""" |
15 | | -try: |
16 | | - from .preview import preview_function # type: ignore |
17 | | -except ImportError: |
18 | | - |
19 | | - def preview_function(response: Any, params: Any) -> Dict: |
20 | | - return {"preview": response} |
21 | | - |
22 | | - |
23 | | -""" |
24 | | - Command Handler Functions. |
25 | | -""" |
26 | | - |
27 | | - |
28 | | -def handle_unknown_command(command): |
29 | | - """ |
30 | | - Function to create the response when the command is unknown. |
31 | | - --- |
32 | | - This function does not handle any of the request body so it is neither parsed or |
33 | | - validated against a schema. Instead, a simple message is returned telling the |
34 | | - requestor that the command isn't allowed. |
35 | | - """ |
36 | | - return {"error": {"message": f"Unknown command '{command}'."}} |
37 | | - |
38 | | - |
39 | | -def handle_healthcheck_command(): |
40 | | - """ |
41 | | - Function to create the response when commanded to perform a healthcheck. |
42 | | - --- |
43 | | - This function does not handle any of the request body so it is neither parsed or |
44 | | - validated against a schema. |
45 | | - """ |
46 | | - return {"command": "healthcheck", "result": healthcheck()} |
47 | | - |
48 | | - |
49 | | -def handle_preview_command(event): |
50 | | - """ |
51 | | - Function to create the response when commanded to preview an answer. |
52 | | - --- |
53 | | - This function attempts to parse the request body, performs schema validation and |
54 | | - attempts to run the evaluation function on the given parameters. |
55 | | -
|
56 | | - If any of these fail, a message is returned and an error field is passed if more |
57 | | - information can be provided. |
58 | | - """ |
59 | | - body, parse_error = parse.parse_body(event) |
| 3 | +from .tools import commands, docs, validate |
| 4 | +from .tools.parse import ParseError |
| 5 | +from .tools.utils import ErrorResponse, HandlerResponse, JsonType, Response |
| 6 | +from .tools.validate import ResBodyValidators, ValidationError |
60 | 7 |
|
61 | | - if parse_error: |
62 | | - return {"error": parse_error} |
63 | 8 |
|
64 | | - request_error = v.validate_preview_request(body) |
65 | | - |
66 | | - if request_error: |
67 | | - return {"error": request_error} |
68 | | - |
69 | | - response = body["response"] |
70 | | - params = body.get("params", dict()) |
71 | | - |
72 | | - try: |
73 | | - result = preview_function(response, params) |
74 | | - # Catch the custom EvaluationException (from evaluation_function_utils) first |
75 | | - except EvaluationException as e: |
76 | | - return {"error": e.error_dict} |
77 | | - |
78 | | - except Exception as e: |
79 | | - return { |
80 | | - "error": { |
81 | | - "message": "An exception was raised while executing the preview function.", |
82 | | - "detail": str(e) if str(e) != "" else repr(e), |
83 | | - } |
84 | | - } |
85 | | - |
86 | | - return {"command": "preview", "result": result} |
87 | | - |
88 | | - |
89 | | -def handle_eval_command(event): |
90 | | - """ |
91 | | - Function to create the response when commanded to evaluate an answer. |
92 | | - --- |
93 | | - This function attempts to parse the request body, performs schema validation and |
94 | | - attempts to run the evaluation function on the given parameters. |
95 | | -
|
96 | | - If any of these fail, a message is returned and an error field is passed if more |
97 | | - information can be provided. |
98 | | - """ |
99 | | - body, parse_error = parse.parse_body(event) |
100 | | - |
101 | | - if parse_error: |
102 | | - return {"error": parse_error} |
103 | | - |
104 | | - request_error = v.validate_eval_request(body) |
105 | | - |
106 | | - if request_error: |
107 | | - return {"error": request_error} |
108 | | - |
109 | | - response = body["response"] |
110 | | - answer = body["answer"] |
111 | | - params = body.get("params", dict()) |
112 | | - |
113 | | - try: |
114 | | - result = evaluation_function(response, answer, params) |
115 | | - |
116 | | - # Catch the custom EvaluationException (from evaluation_function_utils) first |
117 | | - except EvaluationException as e: |
118 | | - return {"error": e.error_dict} |
119 | | - |
120 | | - except Exception as e: |
121 | | - return { |
122 | | - "error": { |
123 | | - "message": "An exception was raised while executing the evaluation function.", |
124 | | - "detail": str(e) if str(e) != "" else repr(e), |
125 | | - } |
126 | | - } |
127 | | - |
128 | | - # If a list of "cases" wasn't provided, we don't have any other way to get feedback |
129 | | - cases = params.get("cases", []) |
130 | | - if len(cases) == 0: |
131 | | - return {"command": "eval", "result": result} |
132 | | - |
133 | | - # Determine what feedback to provide based on cases |
134 | | - matched_case, warnings = feedback_from_cases(response, params, cases) |
135 | | - if matched_case: |
136 | | - result["feedback"] = matched_case["feedback"] |
137 | | - result["matched_case"] = matched_case["id"] |
138 | | - |
139 | | - # Override is_correct provided by the original block by the case 'mark' |
140 | | - if "mark" in matched_case: |
141 | | - result["is_correct"] = bool(int(matched_case["mark"])) |
142 | | - |
143 | | - # Add warnings out output if any were encountered |
144 | | - if len(warnings) != 0: |
145 | | - result["warnings"] = warnings |
146 | | - |
147 | | - return {"command": "eval", "result": result} |
148 | | - |
149 | | - |
150 | | -def feedback_from_cases(response, params, cases): |
151 | | - """ |
152 | | - Attempt to find the correct feedback from a list of cases. |
153 | | - Returns a matched 'case' (the full object), and optional list of warnings |
154 | | - """ |
155 | | - |
156 | | - # A list of "cases" was provided, try matching to each of them |
157 | | - matches = [] |
158 | | - warnings = [] |
159 | | - eval_function_feedback = [] |
160 | | - for i, case in enumerate(cases): |
161 | | - # Validate the case block has an answer and feedback |
162 | | - if "answer" not in case: |
163 | | - warnings += [{"case": i, "message": "Missing answer field"}] |
164 | | - continue |
165 | | - |
166 | | - if "feedback" not in case: |
167 | | - warnings += [{"case": i, "message": "Missing feedback field"}] |
168 | | - continue |
169 | | - |
170 | | - # Merge current evaluation params with any specified in case |
171 | | - case_params = case.get("params", {}) |
172 | | - |
173 | | - # Run the evaluation function based on this case's answer |
174 | | - try: |
175 | | - res = evaluation_function( |
176 | | - response, case.get("answer"), {**params, **case_params} |
177 | | - ) |
178 | | - |
179 | | - except EvaluationException as e: |
180 | | - warnings += [{"case": i, **e.error_dict}] |
181 | | - continue |
182 | | - |
183 | | - except Exception as e: |
184 | | - warnings += [ |
185 | | - { |
186 | | - "case": i, |
187 | | - "message": "An exception was raised while executing the evaluation function.", |
188 | | - "detail": str(e) if str(e) != "" else repr(e), |
189 | | - } |
190 | | - ] |
191 | | - continue |
192 | | - |
193 | | - # Function should always return an 'is_correct' if no errors were raised |
194 | | - if not "is_correct" in res: |
195 | | - warnings += [ |
196 | | - { |
197 | | - "case": i, |
198 | | - "message": "is_correct missing from function output", |
199 | | - } |
200 | | - ] |
201 | | - continue |
202 | | - |
203 | | - # This case matches the response, add it's index to the list of matches |
204 | | - if res.get("is_correct"): |
205 | | - matches += [i] |
206 | | - eval_function_feedback += [res.get("feedback", "")] |
| 9 | +def handle_command(event: JsonType, command: str) -> HandlerResponse: |
| 10 | + if command in ("docs-dev", "docs"): |
| 11 | + return docs.send_dev_docs() |
207 | 12 |
|
208 | | - if len(matches) == 0: |
209 | | - return None, warnings |
| 13 | + elif command == "docs-user": |
| 14 | + return docs.send_user_docs() |
210 | 15 |
|
211 | | - # Select the matched case |
212 | | - matched_case = cases[matches[0]] |
213 | | - matched_case["id"] = matches[0] |
214 | | - if not matched_case["params"].get("override_eval_feedback", False): |
215 | | - separator = "<br />" if len(eval_function_feedback[0]) > 0 else "" |
216 | | - matched_case.update( |
217 | | - { |
218 | | - "feedback": matched_case.get("feedback", "") |
219 | | - + separator |
220 | | - + eval_function_feedback[0] |
221 | | - } |
222 | | - ) |
| 16 | + elif command in ("eval", "grade"): |
| 17 | + response = commands.evaluate(event) |
| 18 | + validator = ResBodyValidators.EVALUATION |
223 | 19 |
|
224 | | - if len(matches) == 1: |
225 | | - # warnings += [{"case": matches[0]}] |
226 | | - return matched_case, warnings |
| 20 | + elif command == "preview": |
| 21 | + response = commands.preview(event) |
| 22 | + validator = ResBodyValidators.PREVIEW |
227 | 23 |
|
| 24 | + elif command == "healthcheck": |
| 25 | + response = commands.healthcheck() |
| 26 | + validator = ResBodyValidators.HEALTHCHECK |
228 | 27 | else: |
229 | | - s = ", ".join([str(m) for m in matches]) |
230 | | - warnings += [ |
231 | | - { |
232 | | - "message": f"Cases {s} were matched. Only the first one's feedback was returned" |
233 | | - } |
234 | | - ] |
235 | | - return matched_case, warnings |
| 28 | + response = Response( |
| 29 | + error=ErrorResponse(message=f"Unknown command '{command}'.") |
| 30 | + ) |
| 31 | + validator = ResBodyValidators.GENERIC |
236 | 32 |
|
| 33 | + validate.body(response, validator) |
237 | 34 |
|
238 | | -""" |
239 | | - Main Handler Function |
240 | | -""" |
| 35 | + return response |
241 | 36 |
|
242 | 37 |
|
243 | | -def handler(event, context={}): |
| 38 | +def handler(event: JsonType, context: JsonType = {}) -> HandlerResponse: |
244 | 39 | """ |
245 | 40 | Main function invoked by AWS Lambda to handle incoming requests. |
246 | 41 | --- |
247 | | - This function invokes the handler function for that particular command and returns |
248 | | - the result. It also performs validation on the response body to make sure it follows |
| 42 | + This function invokes the handler function for that particular command |
| 43 | + and returns |
| 44 | + the result. It also performs validation on the response body to make sure |
| 45 | + it follows |
249 | 46 | the schema set out in the request-response-schema repo. |
250 | 47 | """ |
251 | 48 | headers = event.get("headers", dict()) |
252 | 49 | command = headers.get("command", "eval") |
253 | 50 |
|
254 | | - if command == "healthcheck": |
255 | | - response = handle_healthcheck_command() |
256 | | - response_error = v.validate_healthcheck_response(response) |
257 | | - |
258 | | - # Remove once all funcs update to V2 |
259 | | - elif command == "eval" or command == "grade": |
260 | | - response = handle_eval_command(event) |
261 | | - response_error = v.validate_eval_response(response) |
262 | | - |
263 | | - elif command == "preview": |
264 | | - response = handle_preview_command(event) |
265 | | - response_error = v.validate_preview_response(response) |
| 51 | + try: |
| 52 | + return handle_command(event, command) |
266 | 53 |
|
267 | | - elif command == "docs-dev" or command == "docs": |
268 | | - return docs.send_dev_docs() |
| 54 | + except ParseError as e: |
| 55 | + error = ErrorResponse(message=e.message, detail=e.error_thrown) |
269 | 56 |
|
270 | | - elif command == "docs-user": |
271 | | - return docs.send_user_docs() |
| 57 | + except ValidationError as e: |
| 58 | + error = ErrorResponse(message=e.message, detail=e.error_thrown) |
272 | 59 |
|
273 | | - else: |
274 | | - response = handle_unknown_command(command) |
275 | | - response_error = v.validate_response(response) |
| 60 | + except EvaluationException as e: |
| 61 | + error = e.error_dict |
276 | 62 |
|
277 | | - if response_error: |
278 | | - return {"error": response_error} |
| 63 | + except Exception as e: |
| 64 | + error = ErrorResponse( |
| 65 | + message="An exception was raised while " |
| 66 | + "executing the preview function.", |
| 67 | + detail=(str(e) if str(e) != "" else repr(e)), |
| 68 | + ) |
279 | 69 |
|
280 | | - return response |
| 70 | + return Response(error=error) |
0 commit comments