Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
63 changes: 53 additions & 10 deletions camel/agents/chat_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,11 +240,37 @@ def step(
if openai_new_api:
if not isinstance(response, ChatCompletion):
raise RuntimeError("OpenAI returned unexpected struct")
output_messages = [
ChatMessage(role_name=self.role_name, role_type=self.role_type,
meta_dict=dict(), **dict(choice.message))
for choice in response.choices
]
output_messages = []
for choice in response.choices:
msg = choice.message
# Only pass fields ChatMessage supports; ignore extras like annotations/reasoning
role = getattr(msg, "role", "assistant")
content = getattr(msg, "content", None)
if content is None:
content = ""
# Optional fields supported by ChatMessage
payload = {
"role": role,
"content": content,
}
# pass through refusal/audio if present
if hasattr(msg, "refusal") and getattr(msg, "refusal") is not None:
payload["refusal"] = msg.refusal
if hasattr(msg, "audio") and getattr(msg, "audio") is not None:
payload["audio"] = msg.audio
# function_call and tool_calls if present and compatible
if hasattr(msg, "function_call") and getattr(msg, "function_call") is not None:
payload["function_call"] = msg.function_call
if hasattr(msg, "tool_calls") and getattr(msg, "tool_calls") is not None:
payload["tool_calls"] = msg.tool_calls
output_messages.append(
ChatMessage(
role_name=self.role_name,
role_type=self.role_type,
meta_dict=dict(),
**payload,
)
)
info = self.get_info(
response.id,
response.usage,
Expand All @@ -254,11 +280,28 @@ def step(
else:
if not isinstance(response, dict):
raise RuntimeError("OpenAI returned unexpected struct")
output_messages = [
ChatMessage(role_name=self.role_name, role_type=self.role_type,
meta_dict=dict(), **dict(choice["message"]))
for choice in response["choices"]
]
output_messages = []
for choice in response["choices"]:
msg = choice.get("message", {})
role = msg.get("role", "assistant")
content = msg.get("content") or ""
payload = {"role": role, "content": content}
if "refusal" in msg and msg["refusal"] is not None:
payload["refusal"] = msg["refusal"]
if "audio" in msg and msg["audio"] is not None:
payload["audio"] = msg["audio"]
if "function_call" in msg and msg["function_call"] is not None:
payload["function_call"] = msg["function_call"]
if "tool_calls" in msg and msg["tool_calls"] is not None:
payload["tool_calls"] = msg["tool_calls"]
output_messages.append(
ChatMessage(
role_name=self.role_name,
role_type=self.role_type,
meta_dict=dict(),
**payload,
)
)
info = self.get_info(
response["id"],
response["usage"],
Expand Down
122 changes: 96 additions & 26 deletions camel/model_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,22 +65,30 @@ def __init__(self, model_type: ModelType, model_config_dict: Dict) -> None:

def run(self, *args, **kwargs):
string = "\n".join([message["content"] for message in kwargs["messages"]])
encoding = tiktoken.encoding_for_model(self.model_type.value)
# Be robust to unknown model names in tiktoken
try:
encoding = tiktoken.encoding_for_model(self.model_type.value)
except Exception:
encoding = tiktoken.get_encoding("cl100k_base")
num_prompt_tokens = len(encoding.encode(string))
gap_between_send_receive = 15 * len(kwargs["messages"])
num_prompt_tokens += gap_between_send_receive

if openai_new_api:
# Experimental, add base_url
if BASE_URL:
client = openai.OpenAI(
api_key=OPENAI_API_KEY,
base_url=BASE_URL,
)
else:
client = openai.OpenAI(
api_key=OPENAI_API_KEY
)
try:
if BASE_URL:
client = openai.OpenAI(
api_key=OPENAI_API_KEY,
base_url=BASE_URL,
)
else:
client = openai.OpenAI(
api_key=OPENAI_API_KEY
)
except Exception as e:
print(f"OpenAI client initialization failed: {e}")
raise RuntimeError(f"Failed to initialize OpenAI client: {e}")

num_max_token_map = {
"gpt-3.5-turbo": 4096,
Expand All @@ -91,15 +99,56 @@ def run(self, *args, **kwargs):
"gpt-4-0613": 8192,
"gpt-4-32k": 32768,
"gpt-4-turbo": 100000,
"gpt-4o": 4096, #100000
"gpt-4o-mini": 16384, #100000
"gpt-4o": 100000,
"gpt-4o-mini": 100000,
}
num_max_token = num_max_token_map[self.model_type.value]
num_max_completion_tokens = num_max_token - num_prompt_tokens
self.model_config_dict['max_tokens'] = num_max_completion_tokens

response = client.chat.completions.create(*args, **kwargs, model=self.model_type.value,
**self.model_config_dict)
num_max_token = num_max_token_map.get(self.model_type.value, 100000)
# Compute a safe completion limit (non-GPT-5 only)
num_max_completion_tokens = max(1, num_max_token - max(0, num_prompt_tokens))
safe_cap = 2048
is_gpt5 = str(self.model_type.value).startswith("gpt-5")
if is_gpt5:
# For gpt-5, do NOT send any token parameter and avoid sending
# legacy fields from ChatGPTConfig (temperature, max_tokens, etc.).
send_config = {}
else:
token_param = "max_tokens"
send_config = dict(self.model_config_dict)
send_config.pop("max_completion_tokens", None)
send_config[token_param] = min(num_max_completion_tokens, safe_cap)
try:
log_visualize(
"System",
f"Using {token_param}={send_config[token_param]} for model {self.model_type.value}"
)
except Exception:
pass

# Extra parameters for GPT-5 style models
extra_body = None
create_kwargs = {}
if is_gpt5:
# response_format is a first-class arg, others go via extra_body
create_kwargs["response_format"] = {"type": "text"}
extra_body = {
"verbosity": "medium",
"reasoning_effort": "high",
}
try:
response = client.chat.completions.create(
*args,
**kwargs,
model=self.model_type.value,
**send_config,
**create_kwargs,
extra_body=extra_body,
)
except Exception as e:
log_visualize(
"System",
f"OpenAI chat.completions.create failed: {e}"
)
raise

cost = prompt_cost(
self.model_type.value,
Expand All @@ -124,15 +173,35 @@ def run(self, *args, **kwargs):
"gpt-4-0613": 8192,
"gpt-4-32k": 32768,
"gpt-4-turbo": 100000,
"gpt-4o": 4096, #100000
"gpt-4o-mini": 16384, #100000
"gpt-4o": 100000,
"gpt-4o-mini": 100000,
}
num_max_token = num_max_token_map[self.model_type.value]
num_max_completion_tokens = num_max_token - num_prompt_tokens
self.model_config_dict['max_tokens'] = num_max_completion_tokens

response = openai.ChatCompletion.create(*args, **kwargs, model=self.model_type.value,
**self.model_config_dict)
num_max_token = num_max_token_map.get(self.model_type.value, 100000)
num_max_completion_tokens = max(1, num_max_token - max(0, num_prompt_tokens))
safe_cap = 2048
send_config = dict(self.model_config_dict)
send_config['max_tokens'] = min(num_max_completion_tokens, safe_cap)
try:
log_visualize(
"System",
f"Using max_tokens={send_config['max_tokens']} for model {self.model_type.value}"
)
except Exception:
pass

try:
response = openai.ChatCompletion.create(
*args,
**kwargs,
model=self.model_type.value,
**send_config,
)
except Exception as e:
log_visualize(
"System",
f"OpenAI ChatCompletion.create failed: {e}"
)
raise

cost = prompt_cost(
self.model_type.value,
Expand Down Expand Up @@ -188,6 +257,7 @@ def create(model_type: ModelType, model_config_dict: Dict) -> ModelBackend:
ModelType.GPT_4_TURBO_V,
ModelType.GPT_4O,
ModelType.GPT_4O_MINI,
ModelType.GPT_5,
None
}:
model_class = OpenAIModel
Expand Down
1 change: 1 addition & 0 deletions camel/typing.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ class ModelType(Enum):
GPT_4_TURBO_V = "gpt-4-turbo"
GPT_4O = "gpt-4o"
GPT_4O_MINI = "gpt-4o-mini"
GPT_5 = "gpt-5"

STUB = "stub"

Expand Down
3 changes: 3 additions & 0 deletions camel/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ def num_tokens_from_messages(
ModelType.GPT_4_TURBO_V,
ModelType.GPT_4O,
ModelType.GPT_4O_MINI,
ModelType.GPT_5,
ModelType.STUB
}:
return count_tokens_openai_chat_models(messages, encoding)
Expand Down Expand Up @@ -130,6 +131,8 @@ def get_model_token_limit(model: ModelType) -> int:
return 128000
elif model == ModelType.GPT_4O_MINI:
return 128000
elif model == ModelType.GPT_5:
return 128000
else:
raise ValueError("Unknown model type")

Expand Down
26 changes: 17 additions & 9 deletions camel/web_spider.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,19 @@
self_api_key = os.environ.get('OPENAI_API_KEY')
BASE_URL = os.environ.get('BASE_URL')

if BASE_URL:
client = openai.OpenAI(
api_key=self_api_key,
base_url=BASE_URL,
)
else:
client = openai.OpenAI(
api_key=self_api_key
)
try:
if BASE_URL:
client = openai.OpenAI(
api_key=self_api_key,
base_url=BASE_URL,
)
else:
client = openai.OpenAI(
api_key=self_api_key
)
except Exception as e:
print(f"Warning: OpenAI client initialization failed: {e}")
client = None

def get_baidu_baike_content(keyword):
# design api by the baidubaike
Expand Down Expand Up @@ -53,6 +57,10 @@ def get_wiki_content(keyword):


def modal_trans(task_dsp):
if client is None:
print("OpenAI client not available, skipping web spider")
return ''

try:
task_in ="'" + task_dsp + \
"'Just give me the most important keyword about this sentence without explaining it and your answer should be only one keyword."
Expand Down