Fallback to gpt-3.5-turbo when caculate tokens using custom model
This commit is contained in:
parent
fac14a4244
commit
96a83fd824
1 changed files with 4 additions and 4 deletions
|
@ -122,13 +122,13 @@ class Chatbot:
|
||||||
"""
|
"""
|
||||||
Get token count
|
Get token count
|
||||||
"""
|
"""
|
||||||
|
_engine = self.engine
|
||||||
if self.engine not in ENGINES:
|
if self.engine not in ENGINES:
|
||||||
raise NotImplementedError(
|
# use gpt-3.5-turbo to caculate token
|
||||||
f"Engine {self.engine} is not supported. Select from {ENGINES}",
|
_engine = "gpt-3.5-turbo"
|
||||||
)
|
|
||||||
tiktoken.model.MODEL_TO_ENCODING["gpt-4"] = "cl100k_base"
|
tiktoken.model.MODEL_TO_ENCODING["gpt-4"] = "cl100k_base"
|
||||||
|
|
||||||
encoding = tiktoken.encoding_for_model(self.engine)
|
encoding = tiktoken.encoding_for_model(_engine)
|
||||||
|
|
||||||
num_tokens = 0
|
num_tokens = 0
|
||||||
for message in self.conversation[convo_id]:
|
for message in self.conversation[convo_id]:
|
||||||
|
|
Loading…
Reference in a new issue