diff --git a/app/constants.ts b/app/constants.ts
index 2b2463b..6c3bb9f 100644
--- a/app/constants.ts
+++ b/app/constants.ts
@@ -1,5 +1,6 @@
import { AnthropicIcon } from './src/components/AnthropicIcon'
import { GeminiIcon } from './src/components/GeminiIcon'
+import { GlmIcon } from './src/components/GlmIcon'
import { OpenAIIcon } from './src/components/OpenAIIcon'
const normalizeDomain = (value?: string) => {
@@ -41,6 +42,7 @@ export const MODELS = {
gpt52: { name: 'GPT 5.2', label: 'gpt52', icon: OpenAIIcon },
gpt5Mini: { name: 'GPT 5 Mini', label: 'gpt5Mini', icon: OpenAIIcon },
gemini: { name: 'Gemini', label: 'gemini', icon: GeminiIcon },
+ glm4Plus: { name: 'GLM-4 Plus', label: 'glm4Plus', icon: GlmIcon },
}
export const IMAGE_MODELS = {
diff --git a/app/src/components/GlmIcon.tsx b/app/src/components/GlmIcon.tsx
new file mode 100644
index 0000000..6079634
--- /dev/null
+++ b/app/src/components/GlmIcon.tsx
@@ -0,0 +1,29 @@
+import Svg, { Path } from 'react-native-svg';
+
+interface IGlmIcon {
+ size: number
+ theme: any
+ selected: boolean
+}
+
+export function GlmIcon({
+ size,
+ theme,
+ selected,
+ ...props
+}: IGlmIcon) {
+ const fill = selected ? theme.tintTextColor : theme.textColor
+ return (
+
+ )
+}
diff --git a/app/src/components/index.ts b/app/src/components/index.ts
index 87cc792..c6cae60 100644
--- a/app/src/components/index.ts
+++ b/app/src/components/index.ts
@@ -2,5 +2,6 @@ export { Icon } from './Icon'
export { Header } from './Header'
export { AnthropicIcon } from './AnthropicIcon'
export { GeminiIcon } from './GeminiIcon'
+export { GlmIcon } from './GlmIcon'
export { OpenAIIcon } from './OpenAIIcon'
export { ChatModelModal } from './ChatModelModal'
diff --git a/app/src/screens/chat.tsx b/app/src/screens/chat.tsx
index 13deec5..8ffbac8 100644
--- a/app/src/screens/chat.tsx
+++ b/app/src/screens/chat.tsx
@@ -67,6 +67,8 @@ export function Chat() {
generateGptResponse()
} else if (chatType.label.includes('gemini')) {
generateGeminiResponse()
+ } else if (chatType.label.includes('glm')) {
+ generateGlmResponse()
}
}
async function generateGptResponse() {
@@ -307,6 +309,81 @@ export function Chat() {
es.addEventListener("error", listener)
}
+ async function generateGlmResponse() {
+ if (!input) return
+ Keyboard.dismiss()
+ let localResponse = ''
+ const modelLabel = chatType.label
+ const currentState = getChatState(modelLabel)
+
+ let messageArray = [
+ ...currentState.messages, {
+ user: input,
+ }
+ ] as [{user: string, assistant?: string}]
+
+ updateChatState(modelLabel, prev => ({
+ ...prev,
+ messages: JSON.parse(JSON.stringify(messageArray))
+ }))
+
+ setLoading(true)
+ setTimeout(() => {
+ scrollViewRef.current?.scrollToEnd({
+ animated: true
+ })
+ }, 1)
+ setInput('')
+
+ const eventSourceArgs = {
+ body: {
+ prompt: input,
+ model: chatType.label
+ },
+ type: getChatType(chatType),
+ }
+
+ const es = await getEventSource(eventSourceArgs)
+
+ const listener = (event) => {
+ if (event.type === "open") {
+ console.log("Open SSE connection.")
+ setLoading(false)
+ } else if (event.type === "message") {
+ if (event.data !== "[DONE]") {
+ if (localResponse.length < 850) {
+ scrollViewRef.current?.scrollToEnd({
+ animated: true
+ })
+ }
+ const data = JSON.parse(event.data)
+ if (typeof data === 'string') {
+ localResponse = localResponse + data
+ } else if (data?.content) {
+ localResponse = localResponse + data.content
+ }
+ messageArray[messageArray.length - 1].assistant = localResponse
+ updateChatState(modelLabel, prev => ({
+ ...prev,
+ messages: JSON.parse(JSON.stringify(messageArray))
+ }))
+ } else {
+ setLoading(false)
+ es.close()
+ }
+ } else if (event.type === "error") {
+ console.error("Connection error:", event.message)
+ setLoading(false)
+ } else if (event.type === "exception") {
+ console.error("Error:", event.message, event.error)
+ setLoading(false)
+ }
+ }
+ es.addEventListener("open", listener)
+ es.addEventListener("message", listener)
+ es.addEventListener("error", listener)
+ }
+
async function copyToClipboard(text) {
await Clipboard.setStringAsync(text)
}
diff --git a/app/src/utils.ts b/app/src/utils.ts
index b8022cd..e69c2aa 100644
--- a/app/src/utils.ts
+++ b/app/src/utils.ts
@@ -49,5 +49,8 @@ export function getChatType(type: Model) {
if (type.label.includes('gemini')) {
return 'gemini'
}
+ if (type.label.includes('glm')) {
+ return 'glm'
+ }
else return 'claude'
}
diff --git a/server/.env.example b/server/.env.example
index ac97a9b..99f41a3 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -1,3 +1,4 @@
ANTHROPIC_API_KEY=""
OPENAI_API_KEY=""
GEMINI_API_KEY=""
+GLM_API_KEY=""
diff --git a/server/src/chat/chatRouter.ts b/server/src/chat/chatRouter.ts
index 6bb52b6..7920bd3 100644
--- a/server/src/chat/chatRouter.ts
+++ b/server/src/chat/chatRouter.ts
@@ -1,11 +1,13 @@
import express from 'express'
import { claude } from './claude'
+import { glm } from './glm'
import { gpt } from './gpt'
import { gemini } from './gemini'
const router = express.Router()
router.post('/claude', claude)
+router.post('/glm', glm)
router.post('/gpt', gpt)
router.post('/gemini', gemini)
diff --git a/server/src/chat/glm.ts b/server/src/chat/glm.ts
new file mode 100644
index 0000000..2bfb412
--- /dev/null
+++ b/server/src/chat/glm.ts
@@ -0,0 +1,95 @@
+import { Request, Response } from "express"
+import asyncHandler from 'express-async-handler'
+
+type ModelLabel = 'glm4Plus'
+type ModelName = 'glm-4-plus'
+
+const models: Record = {
+ glm4Plus: 'glm-4-plus',
+}
+
+interface RequestBody {
+ prompt: string;
+ model: ModelLabel;
+}
+
+export const glm = asyncHandler(async (req: Request, res: Response) => {
+ try {
+ res.writeHead(200, {
+ 'Content-Type': 'text/event-stream',
+ 'Connection': 'keep-alive',
+ 'Cache-Control': 'no-cache'
+ })
+
+ const { prompt, model }: RequestBody = req.body
+ const selectedModel = models[model]
+
+ if (!selectedModel) {
+ res.write('data: [DONE]\n\n')
+ res.end()
+ return
+ }
+
+ const decoder = new TextDecoder()
+ const response = await fetch('https://open.bigmodel.cn/api/paas/v4/chat/completions', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'Authorization': `Bearer ${process.env.GLM_API_KEY || ''}`
+ },
+ body: JSON.stringify({
+ model: selectedModel,
+ messages: [{ role: 'user', content: prompt }],
+ stream: true
+ })
+ })
+
+ const reader = response.body?.getReader()
+ if (reader) {
+ let brokenLine = ''
+
+ while (true) {
+ const { done, value } = await reader.read()
+
+ if (done) {
+ break
+ }
+
+ let chunk = decoder.decode(value, {stream: true})
+
+ if (brokenLine) {
+ chunk = brokenLine + chunk
+ brokenLine = ''
+ }
+
+ const lines = chunk.split('\n')
+
+ for (const line of lines) {
+ const trimmed = line.trim()
+ if (!trimmed || !trimmed.startsWith('data: ')) continue
+ const data = trimmed.replace('data: ', '')
+ if (data === '[DONE]') continue
+
+ try {
+ const parsed = JSON.parse(data)
+ if (parsed.choices?.[0]?.delta?.content) {
+ res.write(`data: ${JSON.stringify(parsed.choices[0].delta)}\n\n`)
+ }
+ } catch {
+ brokenLine = line
+ }
+ }
+ }
+
+ res.write('data: [DONE]\n\n')
+ res.end()
+ } else {
+ res.write('data: [DONE]\n\n')
+ res.end()
+ }
+ } catch (err) {
+ console.log('error in GLM chat: ', err)
+ res.write('data: [DONE]\n\n')
+ res.end()
+ }
+})