Skip to content

Commit 466dfb3

Browse files
committed
加入DeepSeek支持
1 parent eece834 commit 466dfb3

File tree

7 files changed

+64
-8
lines changed

7 files changed

+64
-8
lines changed

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
<img alt="GitHub pull requests" src="https://img.shields.io/badge/Telegram-Group-blue.svg?logo=telegram" />
2323
</a>
2424
<p>
25-
支持 GPT-4 · 基于文件问答 · LLM本地部署 · 联网搜索 · Agent 助理 · 支持 Fine-tune
25+
支持 DeepSeek R1 & GPT 4 · 基于文件问答 · LLM本地部署 · 联网搜索 · Agent 助理 · 支持 Fine-tune
2626
</p>
2727
<a href="https://www.bilibili.com/video/BV1mo4y1r7eE"><strong>视频教程</strong></a>
2828
·
@@ -71,7 +71,7 @@
7171
| [Google Gemini Pro](https://ai.google.dev/gemini-api/docs/api-key?hl=zh-cn) | | [StableLM](https://github.com/Stability-AI/StableLM) ||
7272
| [讯飞星火认知大模型](https://xinghuo.xfyun.cn) | | [MOSS](https://github.com/OpenLMLab/MOSS) ||
7373
| [Inspur Yuan 1.0](https://air.inspur.com/home) | | [通义千问](https://github.com/QwenLM/Qwen/tree/main) ||
74-
| [MiniMax](https://api.minimax.chat/) ||||
74+
| [MiniMax](https://api.minimax.chat/) ||[DeepSeek](https://platform.deepseek.com)||
7575
| [XMChat](https://github.com/MILVLG/xmchat) | 不支持流式传输|||
7676
| [Midjourney](https://www.midjourney.com/) | 不支持流式传输|||
7777
| [Claude](https://www.anthropic.com/) | ✨ 现已支持Claude 3 Opus、Sonnet,Haiku将会在推出后的第一时间支持|||

config_example.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33

44
//== API 配置 ==
55
"openai_api_key": "", // 你的 OpenAI API Key,一般必填,若空缺则需在图形界面中填入API Key
6+
"deepseek_api_key": "", // 你的 DeepSeek API Key,用于 DeepSeek Chat 和 Reasoner(R1) 对话模型
67
"google_genai_api_key": "", // 你的 Google Gemini API Key ,用于 Google Gemini 对话模型
78
"google_genai_api_host": "generativelanguage.googleapis.com", // 你的 Google Gemini API Host 地址,一般无需更改
89
"xmchat_api_key": "", // 你的 xmchat API Key,用于 XMChat 对话模型

modules/config.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,9 @@ def load_config_to_environ(key_list):
142142
xmchat_api_key = config.get("xmchat_api_key", "")
143143
os.environ["XMCHAT_API_KEY"] = xmchat_api_key
144144

145+
deepseek_api_key = config.get("deepseek_api_key", "")
146+
os.environ["DEEPSEEK_API_KEY"] = deepseek_api_key
147+
145148
minimax_api_key = config.get("minimax_api_key", "")
146149
os.environ["MINIMAX_API_KEY"] = minimax_api_key
147150
minimax_group_id = config.get("minimax_group_id", "")

modules/models/OpenAIVision.py

Lines changed: 29 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
import colorama
1010
import requests
1111
from io import BytesIO
12-
import uuid
12+
import time
1313

1414
import requests
1515
from PIL import Image
@@ -47,9 +47,27 @@ def get_answer_stream_iter(self):
4747
if response is not None:
4848
iter = self._decode_chat_response(response)
4949
partial_text = ""
50-
for i in iter:
51-
partial_text += i
52-
yield partial_text
50+
reasoning_text = ""
51+
reasoning_start_time = None
52+
53+
for content_delta, reasoning_delta in iter:
54+
if content_delta:
55+
partial_text += content_delta
56+
57+
if reasoning_delta:
58+
if reasoning_start_time is None:
59+
reasoning_start_time = time.time()
60+
elapsed_seconds = 0
61+
reasoning_text += reasoning_delta
62+
if reasoning_text:
63+
if reasoning_delta:
64+
elapsed_seconds = int(time.time() - reasoning_start_time)
65+
reasoning_preview = reasoning_text[-20:].replace("\n", "")
66+
yield f'<details open>\n<summary>Thinking ({elapsed_seconds}s)</summary>\n{reasoning_text}</details>\n\n' + partial_text
67+
else:
68+
yield f'<details>\n<summary>Thought for {elapsed_seconds} s</summary>\n{reasoning_text}</details>\n\n' + partial_text
69+
else:
70+
yield partial_text
5371
else:
5472
yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG
5573

@@ -233,6 +251,8 @@ def _decode_chat_response(self, response):
233251
for chunk in response.iter_lines():
234252
if chunk:
235253
chunk = chunk.decode()
254+
if chunk == ": keep-alive":
255+
continue
236256
chunk_length = len(chunk)
237257
try:
238258
chunk = json.loads(chunk[6:])
@@ -251,7 +271,11 @@ def _decode_chat_response(self, response):
251271
if finish_reason == "stop":
252272
break
253273
try:
254-
yield chunk["choices"][0]["delta"]["content"]
274+
if "reasoning_content" in chunk["choices"][0]["delta"]:
275+
reasoning_content = chunk["choices"][0]["delta"]["reasoning_content"]
276+
else:
277+
reasoning_content = None
278+
yield chunk["choices"][0]["delta"]["content"], reasoning_content
255279
except Exception as e:
256280
# logging.error(f"Error: {e}")
257281
continue

modules/models/base_model.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -160,6 +160,7 @@ class ModelType(Enum):
160160
GoogleGemma = 20
161161
Ollama = 21
162162
Groq = 22
163+
DeepSeek = 23
163164

164165
@classmethod
165166
def get_type(cls, model_name: str):
@@ -224,6 +225,8 @@ def get_type(cls, model_name: str):
224225
model_type = ModelType.DALLE3
225226
elif "gemma" in model_name_lower:
226227
model_type = ModelType.GoogleGemma
228+
elif "deepseek" in model_name_lower:
229+
model_type = ModelType.DeepSeek
227230
else:
228231
model_type = ModelType.LLaMA
229232
return model_type
@@ -1053,7 +1056,7 @@ def load_chat_history(self, new_history_file_path=None):
10531056
logging.info(f"Trimmed history: {saved_json['history']}")
10541057

10551058
# Sanitize chatbot
1056-
saved_json["chatbot"] = remove_html_tags(saved_json["chatbot"])
1059+
saved_json["chatbot"] = saved_json["chatbot"]
10571060
logging.debug(f"{self.user_name} 加载对话历史完毕")
10581061
self.history = saved_json["history"]
10591062
self.single_turn = saved_json.get("single_turn", self.single_turn)

modules/models/models.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,13 @@ def get_model(
4040
access_key = os.environ.get("OPENAI_API_KEY", access_key)
4141
model = OpenAIVisionClient(
4242
model_name, api_key=access_key, user_name=user_name)
43+
elif model_type == ModelType.DeepSeek:
44+
logging.info(f"正在加载 DeepSeek 模型: {model_name}")
45+
from .OpenAIVision import OpenAIVisionClient
46+
access_key = os.environ.get("DEEPSEEK_API_KEY", access_key)
47+
logging.info(access_key)
48+
model = OpenAIVisionClient(
49+
model_name, api_key=access_key, user_name=user_name)
4350
elif model_type == ModelType.OpenAIInstruct:
4451
logging.info(f"正在加载OpenAI Instruct模型: {model_name}")
4552
from .OpenAIInstruct import OpenAI_Instruct_Client

modules/presets.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,8 @@
6464
"Claude 3 Haiku",
6565
"Claude 3.5 Sonnet",
6666
"Claude 3 Opus",
67+
"DeepSeek Chat",
68+
"DeepSeek R1",
6769
"川虎助理",
6870
"川虎助理 Pro",
6971
"DALL-E 3",
@@ -504,6 +506,22 @@
504506
"path": "/v1.1/chat",
505507
"domain": "general"
506508
}
509+
},
510+
"DeepSeek Chat": {
511+
"model_name": "deepseek-chat",
512+
"api_host": "https://api.deepseek.com/v1",
513+
"description": "DeepSeek V3 Chat",
514+
"token_limit": 64000,
515+
"multimodal": False,
516+
"model_type": "DeepSeek"
517+
},
518+
"DeepSeek R1": {
519+
"model_name": "deepseek-reasoner",
520+
"api_host": "https://api.deepseek.com/v1",
521+
"description": "DeepSeek V3 Chat",
522+
"token_limit": 64000,
523+
"multimodal": False,
524+
"model_type": "DeepSeek"
507525
}
508526
}
509527

0 commit comments

Comments
 (0)