Skip to content

Commit d2efd03

Browse files
committed
feat: implement core orchestration loop
1 parent 9a3854e commit d2efd03

File tree

5 files changed

+285
-6
lines changed

5 files changed

+285
-6
lines changed

crates/agentic-core/src/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
//! - `theme`: UI theming system
1212
1313
pub mod models;
14+
pub mod orchestrator;
1415
pub mod settings;
1516
pub mod theme;
1617

crates/agentic-core/src/models.rs

Lines changed: 44 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -279,6 +279,49 @@ impl ModelValidator {
279279
}
280280
}
281281

282+
#[derive(Serialize)]
283+
struct LocalGenerationRequest<'a> {
284+
model: &'a str,
285+
prompt: &'a str,
286+
stream: bool,
287+
}
288+
289+
#[derive(Deserialize)]
290+
struct LocalGenerationResponse {
291+
response: String,
292+
}
293+
294+
pub async fn call_local_model(
295+
endpoint: &str,
296+
model: &str,
297+
prompt: &str,
298+
) -> Result<String, anyhow::Error> {
299+
let client = Client::new();
300+
let url = if endpoint.starts_with("http") {
301+
format!("{}/api/generate", endpoint)
302+
} else {
303+
format!("http://{}/api/generate", endpoint)
304+
};
305+
306+
let payload = LocalGenerationRequest {
307+
model,
308+
prompt,
309+
stream: false,
310+
};
311+
312+
let response = client.post(&url).json(&payload).send().await?;
313+
314+
if response.status().is_success() {
315+
let gen_response: LocalGenerationResponse = response.json().await?;
316+
Ok(gen_response.response)
317+
} else {
318+
Err(anyhow::anyhow!(
319+
"Failed to get response from local model. Status: {}",
320+
response.status()
321+
))
322+
}
323+
}
324+
282325
impl Default for ModelValidator {
283326
fn default() -> Self {
284327
Self::new()
@@ -306,4 +349,4 @@ fn format_relative_time(_iso_time: &str) -> String {
306349
// For now, just return a simple format
307350
// TODO: Parse ISO time and return relative time like "4 days ago"
308351
"recently".to_string()
309-
}
352+
}
Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
use crate::models::call_local_model;
2+
use serde::Deserialize;
3+
4+
const ORCHESTRATOR_PROMPT: &str = r#"You are an expert prompt engineer. Your task is to help a user craft the perfect prompt for a powerful AI model.
5+
The user has provided the following query: "{query}"
6+
7+
Analyze the user's query and generate three distinct proposals for a better prompt.
8+
Each proposal should be a self-contained, ready-to-use prompt.
9+
Use the 5W method (What, Who, When, Where, How) to explore different angles of the user's request.
10+
Rank the proposals by your internal confidence, from least confident to most confident.
11+
12+
Format your response as a JSON object with a single key "proposals" which is an array of three strings.
13+
Example:
14+
{
15+
"proposals": [
16+
"Proposal 1 (least confident)",
17+
"Proposal 2 (medium confident)",
18+
"Proposal 3 (most confident)"
19+
]
20+
}
21+
"#;
22+
23+
const REVISE_PROMPT: &str = r#"You are an expert prompt engineer. A user wants to revise a prompt proposal.
24+
25+
Original Proposal: "{proposal}"
26+
User's Revision: "{revision}"
27+
28+
Your task is to integrate the user's revision into the original proposal to create a new, single, improved prompt.
29+
The new prompt should be self-contained and ready to use.
30+
31+
Format your response as a JSON object with a single key "proposal" which is a string.
32+
Example:
33+
{
34+
"proposal": "This is the new, revised prompt."
35+
}
36+
"#;
37+
38+
#[derive(Deserialize, Debug)]
39+
struct ProposalsResponse {
40+
proposals: Vec<String>,
41+
}
42+
43+
#[derive(Deserialize, Debug)]
44+
struct ReviseResponse {
45+
proposal: String,
46+
}
47+
48+
pub async fn generate_proposals(
49+
query: &str,
50+
endpoint: &str,
51+
model: &str,
52+
) -> Result<Vec<String>, anyhow::Error> {
53+
let prompt = ORCHESTRATOR_PROMPT.replace("{query}", query);
54+
let response_str = call_local_model(endpoint, model, &prompt).await?;
55+
56+
// Attempt to find the start of the JSON object
57+
if let Some(json_start) = response_str.find("{") {
58+
let json_str = &response_str[json_start..];
59+
match serde_json::from_str::<ProposalsResponse>(json_str) {
60+
Ok(response) => Ok(response.proposals),
61+
Err(e) => Err(anyhow::anyhow!("Failed to parse proposals JSON: {}", e)),
62+
}
63+
} else {
64+
Err(anyhow::anyhow!("No JSON object found in model response"))
65+
}
66+
}
67+
68+
pub async fn revise_proposal(
69+
proposal: &str,
70+
revision: &str,
71+
endpoint: &str,
72+
model: &str,
73+
) -> Result<String, anyhow::Error> {
74+
let prompt = REVISE_PROMPT
75+
.replace("{proposal}", proposal)
76+
.replace("{revision}", revision);
77+
let response_str = call_local_model(endpoint, model, &prompt).await?;
78+
79+
// Attempt to find the start of the JSON object
80+
if let Some(json_start) = response_str.find("{") {
81+
let json_str = &response_str[json_start..];
82+
match serde_json::from_str::<ReviseResponse>(json_str) {
83+
Ok(response) => Ok(response.proposal),
84+
Err(e) => Err(anyhow::anyhow!("Failed to parse revision JSON: {}", e)),
85+
}
86+
} else {
87+
Err(anyhow::anyhow!("No JSON object found in model response"))
88+
}
89+
}

0 commit comments

Comments
 (0)