Skip to content

Commit

Permalink
🐛 Bug
Browse files Browse the repository at this point in the history
1. Fixed logging keeps outputting httpx.RemoteProtocolError: Server disconnected without sending a response.

2. Fixed the incorrect assignment of the Claude Max token.

⚙️ Dependence

1. Upgrade python telegram bot library to 21.0.1
  • Loading branch information
yym68686 committed Mar 17, 2024
1 parent 44f4f2d commit 318efb2
Show file tree
Hide file tree
Showing 9 changed files with 89 additions and 28 deletions.
34 changes: 25 additions & 9 deletions bot.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,31 @@


logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger = logging.getLogger()

# 获取 httpx 的 logger
httpx_logger = logging.getLogger("httpx")
# 设置 httpx 的日志级别为 WARNING
httpx_logger.setLevel(logging.WARNING)
httpx_logger.setLevel(logging.CRITICAL)

httpx_logger = logging.getLogger("chromadb.telemetry.posthog")
httpx_logger.setLevel(logging.WARNING)

class SpecificStringFilter(logging.Filter):
def __init__(self, specific_string):
super().__init__()
self.specific_string = specific_string

def filter(self, record):
return self.specific_string not in record.getMessage()

specific_string = "httpx.RemoteProtocolError: Server disconnected without sending a response."
my_filter = SpecificStringFilter(specific_string)

update_logger = logging.getLogger("telegram.ext.Updater")
update_logger.addFilter(my_filter)
update_logger = logging.getLogger("root")
update_logger.addFilter(my_filter)


botNick = config.NICK.lower() if config.NICK else None
botNicKLength = len(botNick) if botNick else 0
print("nick:", botNick)
Expand Down Expand Up @@ -90,7 +104,7 @@ async def command_bot(update, context, language=None, prompt=translator_prompt,
prompt = translator_en2zh_prompt
message = prompt + message
if message:
if "claude-2" in config.GPT_ENGINE and config.ClaudeAPI:
if "claude-2.1" in config.GPT_ENGINE and config.ClaudeAPI:
robot = config.claudeBot
if "claude-3" in config.GPT_ENGINE and config.ClaudeAPI:
robot = config.claude3Bot
Expand Down Expand Up @@ -277,7 +291,7 @@ async def delete_message(update, context, messageid, delay=10):
InlineKeyboardButton("claude-3-sonnet", callback_data="claude-3-sonnet-20240229"),
],
[
InlineKeyboardButton("claude-2", callback_data="claude-2"),
InlineKeyboardButton("claude-2.1", callback_data="claude-2.1"),
],
[
InlineKeyboardButton("gpt-4-0125-preview", callback_data="gpt-4-0125-preview"),
Expand Down Expand Up @@ -356,7 +370,7 @@ async def button_press(update, context):
if (config.API and "gpt-" in data) or (config.API and not config.ClaudeAPI):
config.ChatGPTbot = GPT(api_key=f"{config.API}", engine=config.GPT_ENGINE, system_prompt=config.systemprompt, temperature=config.temperature)
config.ChatGPTbot.reset(convo_id=str(update.effective_chat.id), system_prompt=config.systemprompt)
if config.ClaudeAPI and "claude-2" in data:
if config.ClaudeAPI and "claude-2.1" in data:
config.claudeBot = claudebot(api_key=f"{config.ClaudeAPI}", engine=config.GPT_ENGINE, system_prompt=config.systemprompt, temperature=config.temperature)
if config.ClaudeAPI and "claude-3" in data:
config.claudeBot = claudebot(api_key=f"{config.ClaudeAPI}", engine=config.GPT_ENGINE, system_prompt=config.systemprompt, temperature=config.temperature)
Expand Down Expand Up @@ -436,7 +450,7 @@ async def handle_pdf(update, context):
file_url = new_file.file_path
extracted_text_with_prompt = Document_extract(file_url)
# print(extracted_text_with_prompt)
if config.ClaudeAPI and "claude-2" in config.GPT_ENGINE:
if config.ClaudeAPI and "claude-2.1" in config.GPT_ENGINE:
robot = config.claudeBot
role = "Human"
else:
Expand Down Expand Up @@ -503,6 +517,7 @@ async def start(update, context): # 当用户输入/start时,返回文本
await update.message.reply_text(escape(message), parse_mode='MarkdownV2', disable_web_page_preview=True)

async def error(update, context):
# if str(context.error) == "httpx.RemoteProtocolError: Server disconnected without sending a response.": return
logger.warning('Update "%s" caused error "%s"', update, context.error)
traceback_string = traceback.format_exception(None, context.error, context.error.__traceback__)
logger.warning('Error traceback: %s', ''.join(traceback_string))
Expand Down Expand Up @@ -531,9 +546,10 @@ async def post_init(application: Application) -> None:
ApplicationBuilder()
.token(BOT_TOKEN)
.concurrent_updates(True)
.read_timeout(10)
.connection_pool_size(50000)
.read_timeout(600)
.pool_timeout(1200.0)
.get_updates_read_timeout(600)
.rate_limiter(AIORateLimiter(max_retries=5))
.post_init(post_init)
.build()
Expand Down
10 changes: 10 additions & 0 deletions config.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,16 @@
"VERSION": True,
}

class userConfig:
def __init__(self, user_id: int):
self.user_id = user_id
self.language = LANGUAGE
self.temperature = temperature
self.engine = GPT_ENGINE
self.system_prompt = systemprompt
self.search_system_prompt = prompt.search_system_prompt.format(self.language)
self.search_model = "gpt-3.5-turbo-1106"

class openaiAPI:
def __init__(
self,
Expand Down
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@ md2tgmd
# jieba
python-dotenv
beautifulsoup4
python-telegram-bot[webhooks,rate-limiter]==20.6
python-telegram-bot[webhooks,rate-limiter]==21.0.1
# python-telegram-bot[webhooks,rate-limiter]==20.6

# langchain
# chromadb
Expand Down
2 changes: 1 addition & 1 deletion test/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
# else 14500
# if "gpt-3.5-turbo-16k" in engine or "gpt-3.5-turbo-1106" in engine
# else 98500
# if ("claude-2-web" or "claude-2") in engine
# if ("claude-2-web" or "claude-2.1") in engine
# else 3400
# )

Expand Down
8 changes: 4 additions & 4 deletions test/test_claude.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ class claudebot:
def __init__(
self,
api_key: str,
engine: str = os.environ.get("GPT_ENGINE") or "claude-2",
engine: str = os.environ.get("GPT_ENGINE") or "claude-2.1",
temperature: float = 0.5,
top_p: float = 0.7,
chat_url: str = "https://api.anthropic.com/v1/complete",
Expand Down Expand Up @@ -110,7 +110,7 @@ def get_token_count(self, convo_id: str = "default") -> int:
f"Engine {self.engine} is not supported. Select from {ENGINES}",
)
tiktoken.get_encoding("cl100k_base")
tiktoken.model.MODEL_TO_ENCODING["claude-2"] = "cl100k_base"
tiktoken.model.MODEL_TO_ENCODING["claude-2.1"] = "cl100k_base"

encoding = tiktoken.encoding_for_model(self.engine)

Expand Down Expand Up @@ -145,8 +145,8 @@ def ask_stream(
url = self.chat_url
headers = {
"accept": "application/json",
"anthropic-version": "2023-06-01",
"content-type": "application/json",
"anthropic-version": "2023-06-01",
"content-type": "application/json",
"x-api-key": f"{kwargs.get('api_key', self.api_key)}",
}

Expand Down
2 changes: 1 addition & 1 deletion test/test_claude3.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def get_token_count(self, convo_id: str = "default") -> int:
raise NotImplementedError(
f"Engine {self.engine} is not supported. Select from {ENGINES}",
)
tiktoken.model.MODEL_TO_ENCODING["claude-2"] = "cl100k_base"
tiktoken.model.MODEL_TO_ENCODING["claude-2.1"] = "cl100k_base"
encoding = tiktoken.encoding_for_model(self.engine)

num_tokens = 0
Expand Down
32 changes: 32 additions & 0 deletions test/test_logging.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
import logging

class SpecificStringFilter(logging.Filter):
def __init__(self, specific_string):
super().__init__()
self.specific_string = specific_string

def filter(self, record):
return self.specific_string not in record.getMessage()

# 创建一个 logger
logger = logging.getLogger('my_logger')
logger.setLevel(logging.DEBUG)

# 创建一个 console handler,并设置级别为 debug
ch = logging.StreamHandler()
# ch.setLevel(logging.DEBUG)

# 创建一个 filter 实例
specific_string = "httpx.RemoteProtocolError: Server disconnected without sending a response."
my_filter = SpecificStringFilter(specific_string)

# 将 filter 添加到 handler
ch.addFilter(my_filter)

# 将 handler 添加到 logger
logger.addHandler(ch)

# 测试日志消息
logger.debug("This is a debug message.")
logger.error("This message will be ignored: ignore me.httpx.RemoteProtocolError: Server disconnected without sending a response.")
logger.info("Another info message.")
24 changes: 13 additions & 11 deletions utils/chatgpt2api.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def get_filtered_keys_from_object(obj: object, *keys: str) -> Set[str]:
"gpt-4-vision-preview",
"mixtral-8x7b-32768",
"llama2-70b-4096",
"claude-2",
"claude-2.1",
"claude-3-sonnet-20240229",
"claude-3-opus-20240229",
]
Expand All @@ -71,7 +71,7 @@ class claudebot:
def __init__(
self,
api_key: str,
engine: str = os.environ.get("GPT_ENGINE") or "claude-2",
engine: str = os.environ.get("GPT_ENGINE") or "claude-2.1",
temperature: float = 0.5,
top_p: float = 0.7,
chat_url: str = "https://api.anthropic.com/v1/complete",
Expand Down Expand Up @@ -132,7 +132,7 @@ def get_token_count(self, convo_id: str = "default") -> int:
raise NotImplementedError(
f"Engine {self.engine} is not supported. Select from {ENGINES}",
)
tiktoken.model.MODEL_TO_ENCODING["claude-2"] = "cl100k_base"
tiktoken.model.MODEL_TO_ENCODING["claude-2.1"] = "cl100k_base"
encoding = tiktoken.encoding_for_model(self.engine)

num_tokens = 0
Expand Down Expand Up @@ -274,7 +274,7 @@ def get_token_count(self, convo_id: str = "default") -> int:
raise NotImplementedError(
f"Engine {self.engine} is not supported. Select from {ENGINES}",
)
tiktoken.model.MODEL_TO_ENCODING["claude-2"] = "cl100k_base"
tiktoken.model.MODEL_TO_ENCODING["claude-2.1"] = "cl100k_base"
encoding = tiktoken.encoding_for_model(self.engine)

num_tokens = 0
Expand Down Expand Up @@ -311,6 +311,8 @@ def ask_stream(
"x-api-key": f"{kwargs.get('api_key', self.api_key)}",
"anthropic-version": "2023-06-01",
"content-type": "application/json",
"accept": "application/json"
# "Accept": "*/*"
}

json_post = {
Expand Down Expand Up @@ -425,15 +427,15 @@ def __init__(
self.system_prompt: str = system_prompt
self.max_tokens: int = max_tokens or (
4096
if "gpt-4-1106-preview" in engine or "gpt-4-0125-preview" in engine or "gpt-4-turbo-preview" in engine or "gpt-3.5-turbo-1106" in engine or self.engine == "gpt-4-vision-preview"
if "gpt-4-1106-preview" in engine or "gpt-4-0125-preview" in engine or "gpt-4-turbo-preview" in engine or "gpt-3.5-turbo-1106" in engine or self.engine == "gpt-4-vision-preview" or "claude" in engine
else 31000
if "gpt-4-32k" in engine
else 7000
if "gpt-4" in engine
else 16385
if "gpt-3.5-turbo-16k" in engine
else 99000
if "claude-2-web" in engine or "claude-2" in engine
# else 99000
# if "claude-2.1" in engine
else 4000
)
# context max tokens
Expand All @@ -448,7 +450,7 @@ def __init__(
else 14500
if "gpt-3.5-turbo-16k" in engine or "gpt-3.5-turbo-1106" in engine
else 98500
if "claude-2-web" in engine or "claude-2" in engine
if "claude-2.1" in engine
else 3500
)
self.temperature: float = temperature
Expand Down Expand Up @@ -548,7 +550,7 @@ def truncate_conversation(
while True:
json_post = self.get_post_body(prompt, role, convo_id, model, pass_history, **kwargs)
url = config.bot_api_url.chat_url
if self.engine == "gpt-4-1106-preview" or "gpt-4-0125-preview" in self.engine or "gpt-4-turbo-preview" in self.engine or self.engine == "claude-2" or self.engine == "gpt-4-vision-preview":
if self.engine == "gpt-4-1106-preview" or "gpt-4-0125-preview" in self.engine or "gpt-4-turbo-preview" in self.engine or self.engine == "claude-2.1" or self.engine == "gpt-4-vision-preview":
message_token = {
"total": self.get_token_count(convo_id),
}
Expand Down Expand Up @@ -594,7 +596,7 @@ def get_token_count(self, convo_id: str = "default") -> int:
)
encoding = tiktoken.get_encoding("cl100k_base")
# tiktoken.model.MODEL_TO_ENCODING["gpt-4"] = "cl100k_base"
# tiktoken.model.MODEL_TO_ENCODING["claude-2"] = "cl100k_base"
# tiktoken.model.MODEL_TO_ENCODING["claude-2.1"] = "cl100k_base"
# encoding = tiktoken.encoding_for_model(self.engine)

num_tokens = 0
Expand Down Expand Up @@ -721,7 +723,7 @@ def ask_stream(
print(json.dumps(json_post, indent=4, ensure_ascii=False))
# print(self.conversation[convo_id])

if self.engine == "gpt-4-1106-preview" or self.engine == "gpt-4-vision-preview" or "gpt-4-0125-preview" in self.engine or "gpt-4-turbo-preview" in self.engine:
if self.engine == "gpt-4-1106-preview" or self.engine == "gpt-4-vision-preview" or "gpt-4-0125-preview" in self.engine or "gpt-4-turbo-preview" in self.engine or "claude" in self.engine:
model_max_tokens = kwargs.get("max_tokens", self.max_tokens)
elif self.engine == "gpt-3.5-turbo-1106":
model_max_tokens = min(kwargs.get("max_tokens", self.max_tokens), 16385 - message_token["total"])
Expand Down
2 changes: 1 addition & 1 deletion utils/gpt4free.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
"gpt-4-0125-preview": "gpt-4-turbo",
"gpt-4-vision-preview": "gpt-4",
"claude-2-web": "gpt-4",
"claude-2": "gpt-4",
"claude-2.1": "gpt-4",
}

def ask_stream(message, **kwargs):
Expand Down

0 comments on commit 318efb2

Please sign in to comment.