Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -38,3 +38,7 @@ docker.md
.DS_Store
**/.DS_Store
*.pem

# 方便在本地放一些不用上传给 github 的文档,只需要在本地建立一个 ignore 文件夹,然后把需要忽略的文件放进去就行了
ignore
logs
67 changes: 43 additions & 24 deletions code/config.example.yaml
Original file line number Diff line number Diff line change
@@ -1,21 +1,27 @@
# 飞书
BASE_URL: https://open.feishu.cn
APP_ID: cli_axxx
APP_SECRET: xxx
APP_ENCRYPT_KEY: xxx
APP_VERIFICATION_TOKEN: xxx
## 日志配置
ENABLE_LOG: true

## 飞书
# https://open.feishu.cn
BASE_URL:
APP_ID: XXXXXXXXXXXXXXXXXXXX
APP_SECRET: XXXXXXXXXXXXXXXXXXXX
APP_ENCRYPT_KEY: XXXXXXXXXXXXXXXXXXXX
APP_VERIFICATION_TOKEN: XXXXXXXXXXXXXXXXXXXX
# 请确保和飞书应用管理平台中的设置一致
BOT_NAME: chatGpt
# openAI key 支持负载均衡 可以填写多个key 用逗号分隔
OPENAI_KEY: sk-xxx,sk-xxx,sk-xxx
# openAI model 指定模型,默认为 gpt-3.5-turbo
# 可选参数有:"gpt-4-1106-preview", "gpt-4-32K","gpt-4","gpt-3.5-turbo-16k", "gpt-3.5-turbo","gpt-3.5-turbo-16k","gpt-3.5-turbo-1106" 等
# 如果使用gpt-4,请确认自己是否有接口调用白名单
OPENAI_MODEL: gpt-3.5-turbo
BOT_NAME: XXXXXXXXXXXXXXXXXXXX

## OpenAI
# 支持负载均衡 可以填写多个key 用逗号分隔
OPENAI_KEY: XXXXXXXXXXXXXXXXXXXX
# openai 指定模型, 更多见 https://platform.openai.com/docs/models/model-endpoint-compatibility 中 /v1/chat/completions
# gpt-4 gpt-3.5-turbo
OPENAI_MODEL: gpt-4
# openAI 最大token数 默认为2000
OPENAI_MAX_TOKENS: 2000
# 响应超时时间,单位为毫秒,默认为550毫秒
OPENAI_HTTP_CLIENT_TIMEOUT: 550
OPENAI_MAX_TOKENS:
# 访问OpenAi的 普通 Http请求的超时时间,单位秒,默认为 550 秒
OPENAI_HTTP_CLIENT_TIMEOUT:

# 服务器配置
HTTP_PORT: 9000
HTTPS_PORT: 9001
Expand All @@ -25,13 +31,26 @@ KEY_FILE: key.pem
# openai 地址, 一般不需要修改, 除非你有自己的反向代理
API_URL: https://api.openai.com
# 代理设置, 例如 "http://127.0.0.1:7890", ""代表不使用代理
HTTP_PROXY: ""
HTTP_PROXY:

# 是否开启流式接口返回
STREAM_MODE: false # set true to use stream mode
# AZURE OPENAI
AZURE_ON: false # set true to use Azure rather than OpenAI
AZURE_API_VERSION: 2023-03-15-preview # 2023-03-15-preview or 2022-12-01 refer https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference#completions
AZURE_RESOURCE_NAME: xxxx # you can find in endpoint url. Usually looks like https://{RESOURCE_NAME}.openai.azure.com
AZURE_DEPLOYMENT_NAME: xxxx # usually looks like ...openai.azure.com/openai/deployments/{DEPLOYMENT_NAME}/chat/completions.
AZURE_OPENAI_TOKEN: xxxx # Authentication key. We can use Azure Active Directory Authentication(TBD).
STREAM_MODE: true


## AZURE OPENAI
# 是否启用 AZURE,如果开启的话,将不会启用 OpenAI 的相关功能。
AZURE_ON: false
# 2023-03-15-preview or 2022-12-01 refer https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference#completions
AZURE_API_VERSION: 2023-03-15-preview
# you can find in endpoint url. Usually looks like https://{RESOURCE_NAME}.openai.azure.com
AZURE_RESOURCE_NAME: xxxx
# usually looks like ...openai.azure.com/openai/deployments/{DEPLOYMENT_NAME}/chat/completions.
AZURE_DEPLOYMENT_NAME: xxxx
# Authentication key. We can use Azure Active Directory Authentication(TBD).
AZURE_OPENAI_TOKEN: xxxx

## 访问控制
# 是否启用访问控制。默认不启用。
ACCESS_CONTROL_ENABLE: false
# 每个用户每天最多问多少个问题。默认为不限制. 配置成为小于等于0表示不限制。
ACCESS_CONTROL_MAX_COUNT_PER_USER_PER_DAY: 0
1 change: 1 addition & 0 deletions code/go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ require (
golang.org/x/text v0.8.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

Expand Down
2 changes: 2 additions & 0 deletions code/go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -575,6 +575,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
Expand Down
1 change: 1 addition & 0 deletions code/handlers/event_common_action.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ type MsgInfo struct {
msgType string
msgId *string
chatId *string
userId string
qParsed string
fileKey string
imageKey string
Expand Down
74 changes: 61 additions & 13 deletions code/handlers/event_msg_action.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,11 @@ import (
"encoding/json"
"fmt"
"log"
"start-feishubot/initialization"
"start-feishubot/services/accesscontrol"
"start-feishubot/services/openai"
"strings"
"time"

"start-feishubot/services/openai"
)

func setDefaultPrompt(msg []openai.Messages) []openai.Messages {
Expand Down Expand Up @@ -44,6 +45,11 @@ type MessageAction struct { /*消息*/
}

func (*MessageAction) Execute(a *ActionInfo) bool {

if !allowAccess(a) {
return false
}

if a.handler.config.StreamMode {
return true
}
Expand All @@ -66,6 +72,8 @@ func (*MessageAction) Execute(a *ActionInfo) bool {
}
msg = append(msg, completions)
a.handler.sessionCache.SetMsg(*a.info.sessionId, msg)

printMessage(a, err, msg, completions.Content)
//if new topic
if len(msg) == 3 {
//fmt.Println("new topic", msg[1].Content)
Expand All @@ -87,7 +95,7 @@ func (*MessageAction) Execute(a *ActionInfo) bool {
return true
}

//判断msg中的是否包含system role
// 判断msg中的是否包含system role
func hasSystemRole(msg []openai.Messages) bool {
for _, m := range msg {
if m.Role == "system" {
Expand All @@ -101,6 +109,17 @@ type StreamMessageAction struct { /*消息*/
}

func (m *StreamMessageAction) Execute(a *ActionInfo) bool {

if !allowAccess(a) {
return false
}

//s := "快速响应,用于测试访问控制: " + time.Now().String() +
// " accesscontrol.currentDate " + accesscontrol.GetCurrentDateFlag()
//_ = sendMsg(*a.ctx, s, a.info.chatId)
//log.Println(s)
//return false

if !a.handler.config.StreamMode {
return true
}
Expand Down Expand Up @@ -142,6 +161,7 @@ func (m *StreamMessageAction) Execute(a *ActionInfo) bool {
if err := recover(); err != nil {
err := updateFinalCard(*a.ctx, "聊天失败", cardId, ifNewTopic)
if err != nil {
printErrorMessage(a, msg, err)
return
}
}
Expand All @@ -151,10 +171,10 @@ func (m *StreamMessageAction) Execute(a *ActionInfo) bool {
aiMode := a.handler.sessionCache.GetAIMode(*a.info.sessionId)
//fmt.Println("msg: ", msg)
//fmt.Println("aiMode: ", aiMode)
if err := a.handler.gpt.StreamChat(*a.ctx, msg, aiMode,
chatResponseStream); err != nil {
if err := a.handler.gpt.StreamChat(*a.ctx, msg, aiMode, chatResponseStream); err != nil {
err := updateFinalCard(*a.ctx, "聊天失败", cardId, ifNewTopic)
if err != nil {
printErrorMessage(a, msg, err)
return
}
close(done) // 关闭 done 信号
Expand All @@ -172,6 +192,7 @@ func (m *StreamMessageAction) Execute(a *ActionInfo) bool {
case <-ticker.C:
err := updateTextCard(*a.ctx, answer, cardId, ifNewTopic)
if err != nil {
printErrorMessage(a, msg, err)
return
}
}
Expand All @@ -189,6 +210,7 @@ func (m *StreamMessageAction) Execute(a *ActionInfo) bool {
case <-done: // 添加 done 信号的处理
err := updateFinalCard(*a.ctx, answer, cardId, ifNewTopic)
if err != nil {
printErrorMessage(a, msg, err)
return false
}
ticker.Stop()
Expand All @@ -197,19 +219,41 @@ func (m *StreamMessageAction) Execute(a *ActionInfo) bool {
})
a.handler.sessionCache.SetMsg(*a.info.sessionId, msg)
close(chatResponseStream)
log.Printf("\n\n\n")
jsonByteArray, err := json.Marshal(msg)
if err != nil {
log.Println(err)
}
jsonStr := strings.ReplaceAll(string(jsonByteArray), "\\n", "")
jsonStr = strings.ReplaceAll(jsonStr, "\n", "")
log.Printf("\n\n\n")
printMessage(a, err, msg, answer)
return false
}
}
}

func printMessage(a *ActionInfo, err error, msg []openai.Messages, answer string) {
jsonByteArray, err := json.Marshal(msg)
if err != nil {
log.Printf("Error marshaling JSON request: UserId: %s , Request: %s , Response: %s", a.info.userId, jsonByteArray, answer)
return
}
requestString := strings.ReplaceAll(string(jsonByteArray), "\\n", "")
requestString = strings.ReplaceAll(requestString, "\n", "")
answer = strings.ReplaceAll(answer, "\\n", "")
answer = strings.ReplaceAll(answer, "\n", "")

log.Printf("Success request plain requestString: UserId: %s , Request: %s , Response: %s",
a.info.userId, requestString, answer)
}

func allowAccess(a *ActionInfo) bool {
// Add access control
if initialization.GetConfig().AccessControlEnable &&
!accesscontrol.CheckAllowAccessThenIncrement(&a.info.userId) {

msg := fmt.Sprintf("UserId: 【%s】 has accessed max count today! Max access count today %s: 【%d】",
a.info.userId, accesscontrol.GetCurrentDateFlag(), initialization.GetConfig().AccessControlMaxCountPerUserPerDay)

_ = sendMsg(*a.ctx, msg, a.info.chatId)
return false
}
return true
}

func sendOnProcess(a *ActionInfo, ifNewTopic bool) (*string, error) {
// send 正在处理中
cardId, err := sendOnProcessCard(*a.ctx, a.info.sessionId,
Expand All @@ -220,3 +264,7 @@ func sendOnProcess(a *ActionInfo, ifNewTopic bool) (*string, error) {
return cardId, nil

}

func printErrorMessage(a *ActionInfo, msg []openai.Messages, err error) {
log.Printf("Failed request: UserId: %s , Request: %s , Err: %s", a.info.userId, msg, err)
}
1 change: 1 addition & 0 deletions code/handlers/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ func (m MessageHandler) msgReceivedHandler(ctx context.Context, event *larkim.P2
handlerType: handlerType,
msgType: msgType,
msgId: msgId,
userId: *event.Event.Sender.SenderId.UserId,
chatId: chatId,
qParsed: strings.Trim(parseContent(*content, msgType), " "),
fileKey: parseFileKey(*content),
Expand Down
Loading