Skip to content

Commit 6585c24

Browse files
committed
debugging, removed unnecessary files
1 parent 6f01ad7 commit 6585c24

File tree

8 files changed

+5
-49
lines changed

8 files changed

+5
-49
lines changed

.github/workflows/ai-version-deploy.yml

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -37,11 +37,6 @@ jobs:
3737
python -m pip install --upgrade pip
3838
python -m pip install flake8 pytest
3939
python -m pip install -r requirements.txt
40-
# python -m nltk.downloader wordnet
41-
# python -m nltk.downloader word2vec_sample
42-
# python -m nltk.downloader brown
43-
# python -m nltk.downloader punkt
44-
# python -m nltk.downloader stopwords
4540
4641
- name: Lint with flake8
4742
run: |

app/Dockerfile

Lines changed: 0 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -6,53 +6,10 @@ FROM rabidsheep55/python-base-eval-layer
66

77
WORKDIR /app
88

9-
# RUN mkdir /usr/share/nltk_data
10-
# RUN mkdir -p /usr/share/nltk_data/corpora /usr/share/nltk_data/models /usr/share/nltk_data/tokenizers
11-
12-
# ARG NLTK_DATA=/usr/share/nltk_data
13-
14-
# ENV NLTK_DATA=/usr/share/nltk_data
159
# Copy and install any packages/modules needed for your evaluation script.
1610
COPY requirements.txt .
17-
# COPY brown_length .
18-
# COPY word_freqs .
19-
# COPY w2v .
20-
# RUN yum install -y wget unzip
2111
RUN pip3 install -r requirements.txt
2212

23-
# # Download NLTK data files
24-
# RUN wget -O /usr/share/nltk_data/corpora/wordnet.zip https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/packages/corpora/wordnet.zip
25-
# RUN wget -O /usr/share/nltk_data/models/word2vec_sample.zip https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/packages/models/word2vec_sample.zip
26-
# RUN wget -O /usr/share/nltk_data/corpora/brown.zip https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/packages/corpora/brown.zip
27-
# RUN wget -O /usr/share/nltk_data/corpora/stopwords.zip https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/packages/corpora/stopwords.zip
28-
# RUN wget -O /usr/share/nltk_data/tokenizers/punkt.zip https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/packages/tokenizers/punkt.zip
29-
# RUN wget -O /usr/share/nltk_data/tokenizers/punkt_tab.zip https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/packages/tokenizers/punkt_tab.zip
30-
31-
# # Unzip the downloaded files into the correct subfolders corresponsing to NLTK requirements
32-
# RUN unzip /usr/share/nltk_data/corpora/wordnet.zip -d /usr/share/nltk_data/corpora/
33-
# RUN unzip /usr/share/nltk_data/models/word2vec_sample.zip -d /usr/share/nltk_data/models/
34-
# RUN unzip /usr/share/nltk_data/corpora/brown.zip -d /usr/share/nltk_data/corpora/
35-
# RUN unzip /usr/share/nltk_data/corpora/stopwords.zip -d /usr/share/nltk_data/corpora/
36-
# RUN unzip /usr/share/nltk_data/tokenizers/punkt.zip -d /usr/share/nltk_data/tokenizers/
37-
# RUN unzip /usr/share/nltk_data/tokenizers/punkt_tab.zip -d /usr/share/nltk_data/tokenizers/
38-
39-
# # Clean up zip files to reduce image size
40-
# RUN rm /usr/share/nltk_data/corpora/*.zip
41-
# RUN rm /usr/share/nltk_data/models/*.zip
42-
# RUN rm /usr/share/nltk_data/tokenizers/*.zip
43-
44-
# Warnings: those commands sometimes download corrupted zips, so it is better to wget each package from the main site
45-
# RUN python -m nltk.downloader wordnet
46-
# RUN python -m nltk.downloader word2vec_sample
47-
# RUN python -m nltk.downloader brown
48-
# RUN python -m nltk.downloader stopwords
49-
# RUN python -m nltk.downloader punkt
50-
# RUN python -m nltk.downloader punkt_tab
51-
52-
# Copy the evaluation and testing scripts
53-
# COPY brown_length ./app/
54-
# COPY word_freqs ./app/
55-
# COPY w2v ./app/
5613
COPY evaluation.py ./app/
5714
COPY evaluation_tests.py ./app/
5815

app/brown_length

-17 Bytes
Binary file not shown.

app/evaluation.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,8 @@ def recursive_evaluation(responses, answers, chain, parser):
7979
eval_result = chain.invoke({"word": res, "target": ans})
8080
eval_result_content = eval_result.content
8181
similarity_result = parser.invoke(eval_result_content)
82+
83+
print("eval_result_content: ", eval_result_content, "; similarity_result: ", similarity_result, "; res: ", res, "; ans: ", ans) #TODO: debugging
8284

8385
if similarity_result == "True":
8486
matched_word = ans
@@ -105,7 +107,7 @@ def evaluation_function(response, answer, param=None):
105107
response = parse_input(response)
106108
answer = parse_input(answer)
107109

108-
110+
print("response: ", response, "; answer: ", answer) #TODO: debugging
109111

110112

111113
# Ensure config is provided
@@ -174,6 +176,8 @@ def evaluation_function(response, answer, param=None):
174176
return {"is_correct": False, "error": "Invalid input: response and answer must be lists of strings."}
175177

176178
is_correct, correct_answers, incorrect_answers = recursive_evaluation(response, answer, chain, parser)
179+
print("correct_answers: ", correct_answers, "; incorrect_answers: ", incorrect_answers) #TODO: debugging
180+
177181
#check if student is inputting enough answers
178182
if len(response) < param.response_num_required:
179183
is_correct = False

app/w2v

-51.4 MB
Binary file not shown.

app/word_freqs

-710 KB
Binary file not shown.

brown_length

-17 Bytes
Binary file not shown.

word_freqs

-710 KB
Binary file not shown.

0 commit comments

Comments
 (0)