Skip to content

Commit 8fd060f

Browse files
Merge pull request #109 from HyeryeongJoo/image-template-element-recommedation-poc
image-template-element-recommendation-joohyery-240606
2 parents 1dc3a0e + e04a2db commit 8fd060f

16 files changed

+3525
-0
lines changed

genai/aws-gen-ai-kr/20_applications/05_image_styling_recommendation_with_prompt_engineering/evaluation/eval_utils/langchain_bedrock.py

Lines changed: 134 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111

1212
from langchain_aws import ChatBedrock
1313

14+
1415
class BedrockLangChain:
1516

1617
def __init__(self, bedrock_runtime):
@@ -145,6 +146,139 @@ def print_ww(self, *args, width: int = 100, **kwargs):
145146

146147

147148

149+
class BedrockLangChain_Image:
150+
151+
def __init__(self, bedrock_runtime):
152+
self.bedrock_runtime = bedrock_runtime
153+
154+
def invoke_rewrite_langchain(self, model_id, model_kwargs, system_prompt, user_prompt, recommendation_review, verbose):
155+
156+
model = ChatBedrock(
157+
client=self.bedrock_runtime,
158+
model_id= model_id,
159+
model_kwargs=model_kwargs,
160+
)
161+
162+
163+
messages = [
164+
("system", system_prompt),
165+
("human", user_prompt)
166+
]
167+
168+
prompt = ChatPromptTemplate.from_messages(messages)
169+
if verbose:
170+
print("messages: \n", messages)
171+
print("prompt: \n")
172+
self.print_ww(prompt)
173+
174+
chain = prompt | model | StrOutputParser()
175+
176+
print("## Created Prompt:\n")
177+
response = chain.invoke(
178+
{
179+
"recommendation_review": recommendation_review
180+
}
181+
)
182+
183+
return response
184+
185+
186+
187+
def invoke_creating_criteria_langchain(self, model_id, model_kwargs, system_prompt, user_prompt, guide, verbose):
188+
189+
model = ChatBedrock(
190+
client=self.bedrock_runtime,
191+
model_id= model_id,
192+
model_kwargs=model_kwargs,
193+
)
194+
195+
196+
messages = [
197+
("system", system_prompt),
198+
("human", user_prompt)
199+
]
200+
201+
prompt = ChatPromptTemplate.from_messages(messages)
202+
if verbose:
203+
print("messages: \n", messages)
204+
print("prompt: \n")
205+
self.print_ww(prompt)
206+
207+
chain = prompt | model | StrOutputParser()
208+
209+
print("## Created Prompt:\n")
210+
211+
for chunk in chain.stream(
212+
{
213+
"guide": guide
214+
}
215+
):
216+
print(chunk, end="", flush=True)
217+
218+
219+
def invoke_evaluating_recommendation_review_langchain(self, model_id, model_kwargs, system_prompt, user_prompt, human_message, AI_message, verbose):
220+
221+
model = ChatBedrock(
222+
client=self.bedrock_runtime,
223+
model_id= model_id,
224+
model_kwargs=model_kwargs,
225+
)
226+
227+
228+
229+
messages = [
230+
("system", system_prompt),
231+
("human", user_prompt)
232+
]
233+
234+
prompt = ChatPromptTemplate.from_messages(messages)
235+
if verbose:
236+
print("messages: \n", messages)
237+
print("prompt: \n")
238+
self.print_ww(prompt)
239+
240+
chain = prompt | model | StrOutputParser()
241+
242+
243+
for chunk in chain.stream(
244+
{
245+
"human_text": human_message,
246+
"AI_text": AI_message,
247+
}
248+
):
249+
print(chunk, end="", flush=True)
250+
251+
252+
def set_text_langchain_body(self, prompt):
253+
text_only_body = {
254+
"messages": [
255+
{
256+
"role": "user",
257+
"content": [
258+
{
259+
"type": "text",
260+
"text": prompt,
261+
},
262+
],
263+
}
264+
],
265+
}
266+
return text_only_body
267+
def print_ww(self, *args, width: int = 100, **kwargs):
268+
"""Like print(), but wraps output to `width` characters (default 100)"""
269+
buffer = StringIO()
270+
try:
271+
_stdout = sys.stdout
272+
sys.stdout = buffer
273+
print(*args, **kwargs)
274+
output = buffer.getvalue()
275+
finally:
276+
sys.stdout = _stdout
277+
for line in output.splitlines():
278+
print("\n".join(textwrap.wrap(line, width=width)))
279+
280+
281+
148282
# from langchain.callbacks import StreamlitCallbackHandler
149283
# model_id="anthropic.claude-3-sonnet-20240229-v1:0", # Claude 3 Sonnet 모델 선택
150284
# # 텍스트 생성 LLM 가져오기, streaming_callback을 인자로 받아옴

genai/aws-gen-ai-kr/20_applications/05_image_styling_recommendation_with_prompt_engineering/evaluation/eval_utils/prompt.py

Lines changed: 118 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -116,3 +116,121 @@ def get_fashion_evaluation_user_prompt(self):
116116
'''
117117
return user_prompt
118118

119+
120+
121+
class ImagePrompt():
122+
def __init__(self):
123+
# self.system_prompt = system_prompt
124+
pass
125+
pass
126+
127+
def get_rewrite_system_prompt(self):
128+
'''
129+
주어진 문장을 Re-Write 하는 시스템 프롬프트를 제공 함.
130+
'''
131+
132+
system_prompt = '''The task is to rewrite a given sentence in a different way while preserving its original meaning.\
133+
Your role is to take a sentence provided by the user and rephrase it using different words or sentence structures, \
134+
without altering the core meaning or message conveyed in the original sentence.
135+
136+
Instructions:
137+
1. Read the sentence carefully and ensure you understand its intended meaning.
138+
2. Identify the key components of the sentence, such as the subject, verb, object, and any modifiers or additional information.
139+
3. Think of alternative ways to express the same idea using different vocabulary, sentence structures, or phrasing.
140+
4. Ensure that your rewritten sentence maintains the same essential meaning as the original, without introducing any new information or altering the original intent.
141+
5. Pay attention to grammar, punctuation, and overall coherence to ensure your rewritten sentence is well-formed and easy to understand.
142+
6. If the original sentence contains idioms, metaphors, or cultural references, try to find equivalent expressions or explanations in your rewritten version.
143+
7. Avoid oversimplifying or overly complicating the sentence; aim for a natural and clear rephrasing that maintains the original tone and complexity.
144+
145+
Remember, the goal is to provide a fresh perspective on the sentence while preserving its core meaning and ensuring clarity and coherence in your rewritten version.
146+
'''
147+
148+
return system_prompt
149+
150+
def get_rewrite_user_prompt(self):
151+
'''
152+
주어진 문장을 Re-Write 하는 유저 프롬프트를 제공 함.
153+
'''
154+
155+
user_prompt = '''Given <recommendation_review> based on the guide on system prompt
156+
Please write in Korean. Output in JSON format following the <output_example> format, excluding <output_example>
157+
158+
<recommendation_review>{recommendation_review}</recommendation_review>
159+
<output_example>
160+
"original_recommendation_review" :
161+
"rewrite_original_recommendation_review" :
162+
</output_example>
163+
'''
164+
165+
return user_prompt
166+
167+
168+
def get_create_criteria_system_prompt(self):
169+
'''
170+
주어진 문장을 Re-Write 하는 유저 프롬프트를 제공 함.
171+
'''
172+
system_prompt = '''You are a prompt engineering expert.'''
173+
174+
return system_prompt
175+
176+
def get_create_criteria_user_prompt(self):
177+
178+
user_prompt = '''먼저 당신의 역할과 작업을 XML Tag 없이 기술하세요, \
179+
이후에 아래의 <guide> 에 맟주어서 프롬프트를 영어로 작성해주세요.
180+
<guide>{guide}</guide>'''
181+
182+
return user_prompt
183+
184+
def get_image_recommendation_system_prompt(self):
185+
'''
186+
이미지 요소 추천에 대한 관련성 여부를 평가 하기 위한 시스템 프롬프트를 제공
187+
'''
188+
189+
190+
system_prompt = '''
191+
You will be provided with two opinions: one from a graphic designer regarding image element choices, and \
192+
another from an AI system offering recommendations on image element choices. \
193+
Your task is to evaluate the relevance and coherence between these two opinions \
194+
by assigning a score from 1 to 5, where 1 indicates low relevance and 5 indicates high relevance.\
195+
You will need to define the criteria for scoring in the <criteria></criteria> section, and \
196+
outline the steps for evaluating the two opinions in the <steps></steps> section.
197+
198+
<criteria>
199+
1 - The two opinions are completely unrelated and contradict each other.
200+
2 - The opinions share some minor similarities, but the overall themes and recommendations are largely different.
201+
3 - The opinions have moderate overlap in their themes and recommendations, but there are still notable differences.
202+
4 - The opinions are mostly aligned, with only minor differences in their specific recommendations or perspectives.
203+
5 - The two opinions are highly coherent, complementary, and provide consistent recommendations or perspectives on clothing choices.
204+
</criteria>
205+
206+
<steps>
207+
1. Read and understand the opinion provided by the fashion expert.
208+
2. Read and understand the opinion provided by the AI system.
209+
3. Identify the main themes, recommendations, and perspectives presented in each opinion.
210+
4. Compare the two opinions and assess the degree of alignment or contradiction between them.
211+
5. Based on the criteria defined above, assign a score from 1 to 5 to reflect the relevance and coherence between the two opinions.
212+
6. Provide a brief explanation justifying the assigned score.
213+
</steps>
214+
'''
215+
return system_prompt
216+
217+
def get_image_recommendation_user_prompt(self):
218+
'''
219+
이미지 요소 추천에 대한 관련성 여부를 평가 하기 위한 유저 프롬프트를 제공
220+
'''
221+
222+
user_prompt = '''
223+
Given <human_view> and <AI_view>, based on the guide on system prompt
224+
Write in the form of <evaluation> in korean with JSON format
225+
226+
<human_view>{human_text}</human_view>
227+
<AI_view>{AI_text}</AI_view>
228+
229+
<evaluation>
230+
'human_view':
231+
'AI_view' :
232+
'score': 4,
233+
'reason': 'AI view is similar to human view'
234+
</evaluation>
235+
'''
236+
return user_prompt

0 commit comments

Comments
 (0)