Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions app/constants.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { AnthropicIcon } from './src/components/AnthropicIcon'
import { GeminiIcon } from './src/components/GeminiIcon'
import { GlmIcon } from './src/components/GlmIcon'
import { OpenAIIcon } from './src/components/OpenAIIcon'

const normalizeDomain = (value?: string) => {
Expand Down Expand Up @@ -41,6 +42,7 @@ export const MODELS = {
gpt52: { name: 'GPT 5.2', label: 'gpt52', icon: OpenAIIcon },
gpt5Mini: { name: 'GPT 5 Mini', label: 'gpt5Mini', icon: OpenAIIcon },
gemini: { name: 'Gemini', label: 'gemini', icon: GeminiIcon },
glm4Plus: { name: 'GLM-4 Plus', label: 'glm4Plus', icon: GlmIcon },
}

export const IMAGE_MODELS = {
Expand Down
29 changes: 29 additions & 0 deletions app/src/components/GlmIcon.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import Svg, { Path } from 'react-native-svg';

interface IGlmIcon {
size: number
theme: any
selected: boolean
}

export function GlmIcon({
size,
theme,
selected,
...props
}: IGlmIcon) {
const fill = selected ? theme.tintTextColor : theme.textColor
return (
<Svg
{...props}
width={size}
height={size}
viewBox="0 0 24 24"
>
<Path
d="M12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm0 3c1.66 0 3 1.34 3 3s-1.34 3-3 3-3-1.34-3-3 1.34-3 3-3zm0 14.2c-2.5 0-4.71-1.28-6-3.22.03-1.99 4-3.08 6-3.08 1.99 0 5.97 1.09 6 3.08-1.29 1.94-3.5 3.22-6 3.22z"
fill={fill}
/>
</Svg>
)
}
1 change: 1 addition & 0 deletions app/src/components/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,6 @@ export { Icon } from './Icon'
export { Header } from './Header'
export { AnthropicIcon } from './AnthropicIcon'
export { GeminiIcon } from './GeminiIcon'
export { GlmIcon } from './GlmIcon'
export { OpenAIIcon } from './OpenAIIcon'
export { ChatModelModal } from './ChatModelModal'
77 changes: 77 additions & 0 deletions app/src/screens/chat.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,8 @@ export function Chat() {
generateGptResponse()
} else if (chatType.label.includes('gemini')) {
generateGeminiResponse()
} else if (chatType.label.includes('glm')) {
generateGlmResponse()
}
}
async function generateGptResponse() {
Expand Down Expand Up @@ -307,6 +309,81 @@ export function Chat() {
es.addEventListener("error", listener)
}

async function generateGlmResponse() {
if (!input) return
Keyboard.dismiss()
let localResponse = ''
const modelLabel = chatType.label
const currentState = getChatState(modelLabel)

let messageArray = [
...currentState.messages, {
user: input,
}
] as [{user: string, assistant?: string}]

updateChatState(modelLabel, prev => ({
...prev,
messages: JSON.parse(JSON.stringify(messageArray))
}))

setLoading(true)
setTimeout(() => {
scrollViewRef.current?.scrollToEnd({
animated: true
})
}, 1)
setInput('')

const eventSourceArgs = {
body: {
prompt: input,
model: chatType.label
},
type: getChatType(chatType),
}

const es = await getEventSource(eventSourceArgs)

const listener = (event) => {
if (event.type === "open") {
console.log("Open SSE connection.")
setLoading(false)
} else if (event.type === "message") {
if (event.data !== "[DONE]") {
if (localResponse.length < 850) {
scrollViewRef.current?.scrollToEnd({
animated: true
})
}
const data = JSON.parse(event.data)
if (typeof data === 'string') {
localResponse = localResponse + data
} else if (data?.content) {
localResponse = localResponse + data.content
}
messageArray[messageArray.length - 1].assistant = localResponse
updateChatState(modelLabel, prev => ({
...prev,
messages: JSON.parse(JSON.stringify(messageArray))
}))
} else {
setLoading(false)
es.close()
}
} else if (event.type === "error") {
console.error("Connection error:", event.message)
setLoading(false)
} else if (event.type === "exception") {
console.error("Error:", event.message, event.error)
setLoading(false)
}
}
es.addEventListener("open", listener)
es.addEventListener("message", listener)
es.addEventListener("error", listener)
}

async function copyToClipboard(text) {
await Clipboard.setStringAsync(text)
}
Expand Down
3 changes: 3 additions & 0 deletions app/src/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -49,5 +49,8 @@ export function getChatType(type: Model) {
if (type.label.includes('gemini')) {
return 'gemini'
}
if (type.label.includes('glm')) {
return 'glm'
}
else return 'claude'
}
1 change: 1 addition & 0 deletions server/.env.example
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
ANTHROPIC_API_KEY=""
OPENAI_API_KEY=""
GEMINI_API_KEY=""
GLM_API_KEY=""
2 changes: 2 additions & 0 deletions server/src/chat/chatRouter.ts
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
import express from 'express'
import { claude } from './claude'
import { glm } from './glm'
import { gpt } from './gpt'
import { gemini } from './gemini'

const router = express.Router()

router.post('/claude', claude)
router.post('/glm', glm)
router.post('/gpt', gpt)
router.post('/gemini', gemini)

Expand Down
95 changes: 95 additions & 0 deletions server/src/chat/glm.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
import { Request, Response } from "express"
import asyncHandler from 'express-async-handler'

type ModelLabel = 'glm4Plus'
type ModelName = 'glm-4-plus'

const models: Record<ModelLabel, ModelName> = {
glm4Plus: 'glm-4-plus',
}

interface RequestBody {
prompt: string;
model: ModelLabel;
}

export const glm = asyncHandler(async (req: Request, res: Response) => {
try {
res.writeHead(200, {
'Content-Type': 'text/event-stream',
'Connection': 'keep-alive',
'Cache-Control': 'no-cache'
})

const { prompt, model }: RequestBody = req.body
const selectedModel = models[model]

if (!selectedModel) {
res.write('data: [DONE]\n\n')
res.end()
return
}

const decoder = new TextDecoder()
const response = await fetch('https://open.bigmodel.cn/api/paas/v4/chat/completions', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${process.env.GLM_API_KEY || ''}`
},
body: JSON.stringify({
model: selectedModel,
messages: [{ role: 'user', content: prompt }],
stream: true
})
})

const reader = response.body?.getReader()
if (reader) {
let brokenLine = ''

while (true) {
const { done, value } = await reader.read()

if (done) {
break
}

let chunk = decoder.decode(value, {stream: true})

if (brokenLine) {
chunk = brokenLine + chunk
brokenLine = ''
}

const lines = chunk.split('\n')

for (const line of lines) {
const trimmed = line.trim()
if (!trimmed || !trimmed.startsWith('data: ')) continue
const data = trimmed.replace('data: ', '')
if (data === '[DONE]') continue

try {
const parsed = JSON.parse(data)
if (parsed.choices?.[0]?.delta?.content) {
res.write(`data: ${JSON.stringify(parsed.choices[0].delta)}\n\n`)
}
} catch {
brokenLine = line
}
Comment on lines +67 to +80
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🟡 SSE stream parsing silently drops messages when chunk boundary splits within the data: prefix

The SSE stream parser in glm.ts silently discards entire messages when a TCP/HTTP chunk boundary falls within the data: prefix of an SSE line.

Root Cause

The brokenLine recovery mechanism (lines 60-63, 78-80) only saves a line as brokenLine when JSON.parse fails in the catch block. However, if a chunk boundary splits within the data: prefix itself (e.g., chunk 1 ends with \ndat and chunk 2 starts with a: {"choices":[...]}), the partial line dat is trimmed and skipped at line 69 (continue) because it doesn't start with data: . Since continue bypasses the catch block, brokenLine is never set.

On the next chunk, the continuation a: {"choices":[...]} also doesn't start with data: and is similarly skipped. The entire SSE message is silently lost.

// Line 67-70: Partial lines that don't start with 'data: ' are skipped
// but NOT saved to brokenLine for reassembly
for (const line of lines) {
  const trimmed = line.trim()
  if (!trimmed || !trimmed.startsWith('data: ')) continue  // <-- lost here

Impact: Occasional dropped tokens in streamed responses. In practice this is rare since chunk boundaries typically align to newlines in SSE streams, but it can happen under network conditions that fragment TCP segments at unfortunate boundaries.

Prompt for agents
In server/src/chat/glm.ts, the brokenLine handling needs to account for incomplete lines that don't yet contain the full 'data: ' prefix. Instead of only setting brokenLine in the JSON.parse catch block, the last line of each chunk should always be checked for completeness. One approach: after the for loop (line 67-81), always check if the last line from the split didn't end with a newline (i.e., the original chunk didn't end with '\n'). If so, save it as brokenLine regardless of whether it starts with 'data: '. For example, after line 65 (`const lines = chunk.split('\n')`), check if the chunk ends with '\n' — if not, pop the last element from `lines` and save it as `brokenLine` before entering the for loop. This ensures partial lines are always preserved for reassembly with the next chunk.
Open in Devin Review

Was this helpful? React with 👍 or 👎 to provide feedback.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Acknowledged — this is a valid edge case but extremely unlikely in practice since SSE servers typically flush on line boundaries. The existing GPT and Claude handlers in this repo use similar patterns. Happy to apply the fix if the maintainer wants it, but leaving as-is for now to stay consistent with the rest of the codebase.

}
}

res.write('data: [DONE]\n\n')
res.end()
} else {
res.write('data: [DONE]\n\n')
res.end()
}
} catch (err) {
console.log('error in GLM chat: ', err)
res.write('data: [DONE]\n\n')
res.end()
}
})