Wendong-Fan commited on
Commit
38255bb
·
1 Parent(s): 5ac6182

update example code wd

Browse files
owl/camel/toolkits/search_toolkit.py CHANGED
@@ -705,7 +705,7 @@ class SearchToolkit(BaseToolkit):
705
  search_agent = ChatAgent(
706
  "You are a helpful search agent.",
707
  model=self.model,
708
- tools=[FunctionTool(self.search_wiki), FunctionTool(self.search_google), FunctionTool(self.search_archived_webpage)]
709
  )
710
 
711
  prompt = f"""
 
705
  search_agent = ChatAgent(
706
  "You are a helpful search agent.",
707
  model=self.model,
708
+ tools=[FunctionTool(self.search_duckduckgo),FunctionTool(self.search_wiki), FunctionTool(self.search_google), FunctionTool(self.search_archived_webpage)]
709
  )
710
 
711
  prompt = f"""
owl/camel/toolkits/web_toolkit.py CHANGED
@@ -107,13 +107,25 @@ def _get_bool(d: Any, k: str) -> bool:
107
 
108
  def _parse_json_output(text: str) -> Dict[str, Any]:
109
  # judge if text is markdown format (```json ````)
110
- if text.startswith("```json") and text.endswith("```"):
111
- text = text.replace("```json", "").replace("```", "").strip()
 
 
 
 
 
112
  try:
113
  return json.loads(text)
114
  except json.JSONDecodeError:
115
  logger.warning(f"Failed to parse JSON output: {text}")
116
- return {}
 
 
 
 
 
 
 
117
 
118
 
119
  def _reload_image(image: Image.Image):
 
107
 
108
  def _parse_json_output(text: str) -> Dict[str, Any]:
109
  # judge if text is markdown format (```json ````)
110
+ if "```json" in text and "```" in text:
111
+ # Extract content between ```json and the last ```
112
+ start_idx = text.find("```json") + len("```json")
113
+ end_idx = text.rfind("```")
114
+ if start_idx > -1 and end_idx > start_idx:
115
+ text = text[start_idx:end_idx].strip()
116
+
117
  try:
118
  return json.loads(text)
119
  except json.JSONDecodeError:
120
  logger.warning(f"Failed to parse JSON output: {text}")
121
+ # Try to clean the text further and attempt parsing again
122
+ try:
123
+ # Remove any extra whitespace or control characters
124
+ cleaned_text = text.strip()
125
+ return json.loads(cleaned_text)
126
+ except json.JSONDecodeError:
127
+ logger.error(f"Failed to parse JSON even after cleaning: {text}")
128
+ return {}
129
 
130
 
131
  def _reload_image(image: Image.Image):
owl/run.py CHANGED
@@ -3,89 +3,123 @@ load_dotenv()
3
 
4
  from camel.models import ModelFactory
5
  from camel.toolkits import (
6
- WebToolkit,
7
- DocumentProcessingToolkit,
8
- VideoAnalysisToolkit,
9
- AudioAnalysisToolkit,
10
- CodeExecutionToolkit,
11
- ImageAnalysisToolkit,
12
  SearchToolkit,
13
- ExcelToolkit
14
- )
 
15
  from camel.types import ModelPlatformType, ModelType
16
- # from camel.configs import ChatGPTConfig
17
-
18
-
19
- from loguru import logger
20
 
21
  from utils import OwlRolePlaying, run_society
22
 
23
 
24
-
25
  def construct_society(question: str) -> OwlRolePlaying:
26
- r"""Construct the society based on the question."""
27
-
28
- user_role_name = "user"
29
- assistant_role_name = "assistant"
30
 
31
- user_model = ModelFactory.create(
32
- model_platform=ModelPlatformType.OPENAI,
33
- model_type=ModelType.GPT_4O,
34
- # model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(), # [Optional] the config for model
35
- )
36
-
37
- assistant_model = ModelFactory.create(
38
- model_platform=ModelPlatformType.OPENAI,
39
- model_type=ModelType.GPT_4O,
40
- # model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(), # [Optional] the config for model
41
- )
42
-
43
- tools_list = [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  *WebToolkit(
45
- headless=False, # Set to True if you want to run in headless mode (e.g. on a remote server)
46
- web_agent_model=assistant_model,
47
- planning_agent_model=assistant_model
48
  ).get_tools(),
49
  *DocumentProcessingToolkit().get_tools(),
50
- *VideoAnalysisToolkit(model=assistant_model).get_tools(), # This requires OpenAI Key
51
- *AudioAnalysisToolkit().get_tools(), # This requires OpenAI Key
52
  *CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
53
- *ImageAnalysisToolkit(model=assistant_model).get_tools(),
54
- *SearchToolkit(model=assistant_model).get_tools(),
55
- *ExcelToolkit().get_tools()
56
  ]
57
-
58
- user_role_name = 'user'
59
- user_agent_kwargs = dict(model=user_model)
60
- assistant_role_name = 'assistant'
61
- assistant_agent_kwargs = dict(model=assistant_model,
62
- tools=tools_list)
63
 
 
 
 
 
 
64
  task_kwargs = {
65
- 'task_prompt': question,
66
- 'with_task_specify': False,
67
  }
68
-
 
69
  society = OwlRolePlaying(
70
  **task_kwargs,
71
- user_role_name=user_role_name,
72
  user_agent_kwargs=user_agent_kwargs,
73
- assistant_role_name=assistant_role_name,
74
  assistant_agent_kwargs=assistant_agent_kwargs,
75
  )
76
 
77
  return society
78
 
79
 
80
- # Example case
81
- question = "What was the volume in m^3 of the fish bag that was calculated in the University of Leicester paper `Can Hiccup Supply Enough Fish to Maintain a Dragon’s Diet?` "
82
-
83
- society = construct_society(question)
84
- answer, chat_history, token_count = run_society(society)
85
-
86
- logger.success(f"Answer: {answer}")
87
-
88
-
89
-
 
 
 
 
 
90
 
91
 
 
 
 
3
 
4
  from camel.models import ModelFactory
5
  from camel.toolkits import (
6
+ AudioAnalysisToolkit,
7
+ CodeExecutionToolkit,
8
+ DocumentProcessingToolkit,
9
+ ExcelToolkit,
10
+ ImageAnalysisToolkit,
 
11
  SearchToolkit,
12
+ VideoAnalysisToolkit,
13
+ WebToolkit,
14
+ )
15
  from camel.types import ModelPlatformType, ModelType
 
 
 
 
16
 
17
  from utils import OwlRolePlaying, run_society
18
 
19
 
 
20
  def construct_society(question: str) -> OwlRolePlaying:
21
+ r"""Construct a society of agents based on the given question.
 
 
 
22
 
23
+ Args:
24
+ question (str): The task or question to be addressed by the society.
25
+
26
+ Returns:
27
+ OwlRolePlaying: A configured society of agents ready to address the question.
28
+ """
29
+
30
+ # Create models for different components
31
+ models = {
32
+ "user": ModelFactory.create(
33
+ model_platform=ModelPlatformType.OPENAI,
34
+ model_type=ModelType.GPT_4O,
35
+ model_config_dict={"temperature": 0},
36
+ ),
37
+ "assistant": ModelFactory.create(
38
+ model_platform=ModelPlatformType.OPENAI,
39
+ model_type=ModelType.GPT_4O,
40
+ model_config_dict={"temperature": 0},
41
+ ),
42
+ "web": ModelFactory.create(
43
+ model_platform=ModelPlatformType.OPENAI,
44
+ model_type=ModelType.GPT_4O,
45
+ model_config_dict={"temperature": 0},
46
+ ),
47
+ "planning": ModelFactory.create(
48
+ model_platform=ModelPlatformType.OPENAI,
49
+ model_type=ModelType.GPT_4O,
50
+ model_config_dict={"temperature": 0},
51
+ ),
52
+ "video": ModelFactory.create(
53
+ model_platform=ModelPlatformType.OPENAI,
54
+ model_type=ModelType.GPT_4O,
55
+ model_config_dict={"temperature": 0},
56
+ ),
57
+ "image": ModelFactory.create(
58
+ model_platform=ModelPlatformType.OPENAI,
59
+ model_type=ModelType.GPT_4O,
60
+ model_config_dict={"temperature": 0},
61
+ ),
62
+ "search": ModelFactory.create(
63
+ model_platform=ModelPlatformType.OPENAI,
64
+ model_type=ModelType.GPT_4O,
65
+ model_config_dict={"temperature": 0},
66
+ ),
67
+ }
68
+
69
+ # Configure toolkits
70
+ tools = [
71
  *WebToolkit(
72
+ headless=False, # Set to True for headless mode (e.g., on remote servers)
73
+ web_agent_model=models["web"],
74
+ planning_agent_model=models["planning"],
75
  ).get_tools(),
76
  *DocumentProcessingToolkit().get_tools(),
77
+ *VideoAnalysisToolkit(model=models["video"]).get_tools(), # This requires OpenAI Key
78
+ *AudioAnalysisToolkit().get_tools(), # This requires OpenAI Key
79
  *CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
80
+ *ImageAnalysisToolkit(model=models["image"]).get_tools(),
81
+ *SearchToolkit(model=models["search"]).get_tools(),
82
+ *ExcelToolkit().get_tools(),
83
  ]
 
 
 
 
 
 
84
 
85
+ # Configure agent roles and parameters
86
+ user_agent_kwargs = {"model": models["user"]}
87
+ assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
88
+
89
+ # Configure task parameters
90
  task_kwargs = {
91
+ "task_prompt": question,
92
+ "with_task_specify": False,
93
  }
94
+
95
+ # Create and return the society
96
  society = OwlRolePlaying(
97
  **task_kwargs,
98
+ user_role_name="user",
99
  user_agent_kwargs=user_agent_kwargs,
100
+ assistant_role_name="assistant",
101
  assistant_agent_kwargs=assistant_agent_kwargs,
102
  )
103
 
104
  return society
105
 
106
 
107
+ def main():
108
+ r"""Main function to run the OWL system with an example question."""
109
+ # Example research question
110
+ question = (
111
+ "What was the volume in m^3 of the fish bag that was calculated in "
112
+ "the University of Leicester paper `Can Hiccup Supply Enough Fish "
113
+ "to Maintain a Dragon's Diet?`"
114
+ )
115
+
116
+ # Construct and run the society
117
+ society = construct_society(question)
118
+ answer, chat_history, token_count = run_society(society)
119
+
120
+ # Output the result
121
+ print(f"Answer: {answer}")
122
 
123
 
124
+ if __name__ == "__main__":
125
+ main()
owl/run_deepseek.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dotenv import load_dotenv
2
+ load_dotenv()
3
+
4
+ from camel.models import ModelFactory
5
+ from camel.toolkits import (
6
+ CodeExecutionToolkit,
7
+ DocumentProcessingToolkit,
8
+ ExcelToolkit,
9
+ ImageAnalysisToolkit,
10
+ SearchToolkit,
11
+ WebToolkit,
12
+ )
13
+ from camel.types import ModelPlatformType, ModelType
14
+
15
+ from utils import OwlRolePlaying, run_society
16
+
17
+
18
+ def construct_society(question: str) -> OwlRolePlaying:
19
+ r"""Construct a society of agents based on the given question.
20
+
21
+ Args:
22
+ question (str): The task or question to be addressed by the society.
23
+
24
+ Returns:
25
+ OwlRolePlaying: A configured society of agents ready to address the question.
26
+ """
27
+
28
+ # Create models for different components
29
+ models = {
30
+ "user": ModelFactory.create(
31
+ model_platform=ModelPlatformType.DEEPSEEK,
32
+ model_type=ModelType.DEEPSEEK_CHAT,
33
+ model_config_dict={"temperature": 0},
34
+ ),
35
+ "assistant": ModelFactory.create(
36
+ model_platform=ModelPlatformType.DEEPSEEK,
37
+ model_type=ModelType.DEEPSEEK_CHAT,
38
+ model_config_dict={"temperature": 0},
39
+ ),
40
+ "web": ModelFactory.create(
41
+ model_platform=ModelPlatformType.DEEPSEEK,
42
+ model_type=ModelType.DEEPSEEK_CHAT,
43
+ model_config_dict={"temperature": 0},
44
+ ),
45
+ "planning": ModelFactory.create(
46
+ model_platform=ModelPlatformType.DEEPSEEK,
47
+ model_type=ModelType.DEEPSEEK_CHAT,
48
+ model_config_dict={"temperature": 0},
49
+ ),
50
+ "video": ModelFactory.create(
51
+ model_platform=ModelPlatformType.DEEPSEEK,
52
+ model_type=ModelType.DEEPSEEK_CHAT,
53
+ model_config_dict={"temperature": 0},
54
+ ),
55
+ "image": ModelFactory.create(
56
+ model_platform=ModelPlatformType.DEEPSEEK,
57
+ model_type=ModelType.DEEPSEEK_CHAT,
58
+ model_config_dict={"temperature": 0},
59
+ ),
60
+ "search": ModelFactory.create(
61
+ model_platform=ModelPlatformType.DEEPSEEK,
62
+ model_type=ModelType.DEEPSEEK_CHAT,
63
+ model_config_dict={"temperature": 0},
64
+ ),
65
+ }
66
+
67
+ # Configure toolkits
68
+ tools = [
69
+ *WebToolkit(
70
+ headless=False, # Set to True for headless mode (e.g., on remote servers)
71
+ web_agent_model=models["web"],
72
+ planning_agent_model=models["planning"],
73
+ ).get_tools(),
74
+ *DocumentProcessingToolkit().get_tools(),
75
+ *CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
76
+ *ImageAnalysisToolkit(model=models["image"]).get_tools(),
77
+ *SearchToolkit(model=models["search"]).get_tools(),
78
+ *ExcelToolkit().get_tools(),
79
+ ]
80
+
81
+ # Configure agent roles and parameters
82
+ user_agent_kwargs = {"model": models["user"]}
83
+ assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
84
+
85
+ # Configure task parameters
86
+ task_kwargs = {
87
+ "task_prompt": question,
88
+ "with_task_specify": False,
89
+ }
90
+
91
+ # Create and return the society
92
+ society = OwlRolePlaying(
93
+ **task_kwargs,
94
+ user_role_name="user",
95
+ user_agent_kwargs=user_agent_kwargs,
96
+ assistant_role_name="assistant",
97
+ assistant_agent_kwargs=assistant_agent_kwargs,
98
+ )
99
+
100
+ return society
101
+
102
+
103
+ def main():
104
+ r"""Main function to run the OWL system with an example question."""
105
+ # Example research question
106
+ question = (
107
+ "What was the volume in m^3 of the fish bag that was calculated in "
108
+ "the University of Leicester paper `Can Hiccup Supply Enough Fish "
109
+ "to Maintain a Dragon's Diet?`"
110
+ )
111
+
112
+ # Construct and run the society
113
+ society = construct_society(question)
114
+ answer, chat_history, token_count = run_society(society)
115
+
116
+ # Output the result
117
+ print(f"Answer: {answer}")
118
+
119
+
120
+ if __name__ == "__main__":
121
+ main()
owl/run_deepseek_example.py DELETED
@@ -1,81 +0,0 @@
1
- from camel.models import ModelFactory
2
- from camel.toolkits import *
3
- from camel.types import ModelPlatformType, ModelType
4
- from camel.configs import DeepSeekConfig
5
-
6
- from dotenv import load_dotenv
7
- from loguru import logger
8
-
9
- from utils import OwlRolePlaying, run_society
10
-
11
-
12
- load_dotenv()
13
-
14
-
15
- def construct_society(question: str) -> OwlRolePlaying:
16
- r"""Construct the society based on the question."""
17
-
18
- user_role_name = "user"
19
- assistant_role_name = "assistant"
20
-
21
- user_model = ModelFactory.create(
22
- model_platform=ModelPlatformType.DEEPSEEK,
23
- model_type=ModelType.DEEPSEEK_CHAT,
24
- model_config_dict=DeepSeekConfig(temperature=0, top_p=1).as_dict(), # [Optional] the config for model
25
- )
26
-
27
- assistant_model = ModelFactory.create(
28
- model_platform=ModelPlatformType.DEEPSEEK,
29
- model_type=ModelType.DEEPSEEK_CHAT,
30
- model_config_dict=DeepSeekConfig(temperature=0, top_p=1).as_dict(), # [Optional] the config for model
31
- )
32
-
33
- tools_list = [
34
- *WebToolkit(
35
- headless=False, # Set to True if you want to run in headless mode (e.g. on a remote server)
36
- web_agent_model=assistant_model,
37
- planning_agent_model=assistant_model
38
- ).get_tools(),
39
- *DocumentProcessingToolkit().get_tools(),
40
- # *VideoAnalysisToolkit().get_tools(), # This requires OpenAI and Qwen Key
41
- *CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
42
- *ImageAnalysisToolkit(model=assistant_model).get_tools(),
43
- # *AudioAnalysisToolkit().get_tools(), # This requires OpenAI Key
44
- *SearchToolkit(model=assistant_model).get_tools(),
45
- *ExcelToolkit().get_tools()
46
- ]
47
-
48
- user_role_name = 'user'
49
- user_agent_kwargs = dict(model=user_model)
50
- assistant_role_name = 'assistant'
51
- assistant_agent_kwargs = dict(model=assistant_model,
52
- tools=tools_list)
53
-
54
- task_kwargs = {
55
- 'task_prompt': question,
56
- 'with_task_specify': False,
57
- }
58
-
59
- society = OwlRolePlaying(
60
- **task_kwargs,
61
- user_role_name=user_role_name,
62
- user_agent_kwargs=user_agent_kwargs,
63
- assistant_role_name=assistant_role_name,
64
- assistant_agent_kwargs=assistant_agent_kwargs,
65
- )
66
-
67
- return society
68
-
69
-
70
- # Example case
71
- question = "What was the volume in m^3 of the fish bag that was calculated in the University of Leicester paper `Can Hiccup Supply Enough Fish to Maintain a Dragon’s Diet?` "
72
-
73
- society = construct_society(question)
74
- answer, chat_history, token_count = run_society(society)
75
-
76
- logger.success(f"Answer: {answer}")
77
-
78
-
79
-
80
-
81
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
owl/run_gaia_roleplaying.py CHANGED
@@ -1,80 +1,120 @@
1
- from camel.models import ModelFactory
2
- from camel.toolkits import *
3
- from camel.types import ModelPlatformType, ModelType
4
- from camel.configs import ChatGPTConfig
5
- from utils import GAIABenchmark
6
-
7
  from dotenv import load_dotenv
8
- from loguru import logger
9
 
10
  import os
 
11
 
12
- load_dotenv()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
 
 
15
  LEVEL = 1
16
  SAVE_RESULT = True
17
  test_idx = [0]
18
 
19
 
20
  def main():
21
-
 
22
  cache_dir = "tmp/"
23
  os.makedirs(cache_dir, exist_ok=True)
24
 
25
- user_model = ModelFactory.create(
26
- model_platform=ModelPlatformType.OPENAI,
27
- model_type=ModelType.GPT_4O,
28
- model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(), # [Optional] the config for model
29
- )
30
-
31
- assistant_model = ModelFactory.create(
32
- model_platform=ModelPlatformType.OPENAI,
33
- model_type=ModelType.GPT_4O,
34
- model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(), # [Optional] the config for model
35
- )
36
-
37
- tools_list = [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  *WebToolkit(
39
- headless=False, # Set to True if you want to run in headless mode (e.g. on a remote server)
40
- web_agent_model=assistant_model,
41
- planning_agent_model=assistant_model
42
  ).get_tools(),
43
  *DocumentProcessingToolkit().get_tools(),
44
- *VideoAnalysisToolkit(model=assistant_model).get_tools(), # This requires OpenAI Key
45
  *AudioAnalysisToolkit().get_tools(), # This requires OpenAI Key
46
  *CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
47
- *ImageAnalysisToolkit(model=assistant_model).get_tools(),
48
- *SearchToolkit(model=assistant_model).get_tools(),
49
- *ExcelToolkit().get_tools()
50
  ]
 
 
 
 
51
 
52
- user_role_name = 'user'
53
- user_agent_kwargs = dict(model=user_model)
54
- assistant_role_name = 'assistant'
55
- assistant_agent_kwargs = dict(model=assistant_model,
56
- tools=tools_list)
57
-
58
  benchmark = GAIABenchmark(
59
  data_dir="data/gaia",
60
  save_to=f"results/result.json"
61
  )
62
 
 
63
  print(f"Number of validation examples: {len(benchmark.valid)}")
64
  print(f"Number of test examples: {len(benchmark.test)}")
65
 
 
66
  result = benchmark.run(
67
  on="valid",
68
  level=LEVEL,
69
  idx=test_idx,
70
  save_result=SAVE_RESULT,
71
-
72
- user_role_name=user_role_name,
73
  user_agent_kwargs=user_agent_kwargs,
74
- assistant_role_name=assistant_role_name,
75
  assistant_agent_kwargs=assistant_agent_kwargs,
76
- )
77
 
 
78
  logger.success(f"Correct: {result['correct']}, Total: {result['total']}")
79
  logger.success(f"Accuracy: {result['accuracy']}")
80
 
 
 
 
 
 
 
 
1
  from dotenv import load_dotenv
2
+ load_dotenv()
3
 
4
  import os
5
+ from loguru import logger
6
 
7
+ from camel.models import ModelFactory
8
+ from camel.toolkits import (
9
+ AudioAnalysisToolkit,
10
+ CodeExecutionToolkit,
11
+ DocumentProcessingToolkit,
12
+ ExcelToolkit,
13
+ ImageAnalysisToolkit,
14
+ SearchToolkit,
15
+ VideoAnalysisToolkit,
16
+ WebToolkit,
17
+ )
18
+ from camel.types import ModelPlatformType, ModelType
19
+ from camel.configs import ChatGPTConfig
20
+
21
+ from utils import GAIABenchmark
22
 
23
 
24
+ # Configuration
25
  LEVEL = 1
26
  SAVE_RESULT = True
27
  test_idx = [0]
28
 
29
 
30
  def main():
31
+ """Main function to run the GAIA benchmark."""
32
+ # Create cache directory
33
  cache_dir = "tmp/"
34
  os.makedirs(cache_dir, exist_ok=True)
35
 
36
+ # Create models for different components
37
+ models = {
38
+ "user": ModelFactory.create(
39
+ model_platform=ModelPlatformType.OPENAI,
40
+ model_type=ModelType.GPT_4O,
41
+ model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(),
42
+ ),
43
+ "assistant": ModelFactory.create(
44
+ model_platform=ModelPlatformType.OPENAI,
45
+ model_type=ModelType.GPT_4O,
46
+ model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(),
47
+ ),
48
+ "web": ModelFactory.create(
49
+ model_platform=ModelPlatformType.OPENAI,
50
+ model_type=ModelType.GPT_4O,
51
+ model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(),
52
+ ),
53
+ "planning": ModelFactory.create(
54
+ model_platform=ModelPlatformType.OPENAI,
55
+ model_type=ModelType.GPT_4O,
56
+ model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(),
57
+ ),
58
+ "video": ModelFactory.create(
59
+ model_platform=ModelPlatformType.OPENAI,
60
+ model_type=ModelType.GPT_4O,
61
+ model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(),
62
+ ),
63
+ "image": ModelFactory.create(
64
+ model_platform=ModelPlatformType.OPENAI,
65
+ model_type=ModelType.GPT_4O,
66
+ model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(),
67
+ ),
68
+ "search": ModelFactory.create(
69
+ model_platform=ModelPlatformType.OPENAI,
70
+ model_type=ModelType.GPT_4O,
71
+ model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(),
72
+ ),
73
+ }
74
+
75
+ # Configure toolkits
76
+ tools = [
77
  *WebToolkit(
78
+ headless=False, # Set to True for headless mode (e.g., on remote servers)
79
+ web_agent_model=models["web"],
80
+ planning_agent_model=models["planning"],
81
  ).get_tools(),
82
  *DocumentProcessingToolkit().get_tools(),
83
+ *VideoAnalysisToolkit(model=models["video"]).get_tools(), # This requires OpenAI Key
84
  *AudioAnalysisToolkit().get_tools(), # This requires OpenAI Key
85
  *CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
86
+ *ImageAnalysisToolkit(model=models["image"]).get_tools(),
87
+ *SearchToolkit(model=models["search"]).get_tools(),
88
+ *ExcelToolkit().get_tools(),
89
  ]
90
+
91
+ # Configure agent roles and parameters
92
+ user_agent_kwargs = {"model": models["user"]}
93
+ assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
94
 
95
+ # Initialize benchmark
 
 
 
 
 
96
  benchmark = GAIABenchmark(
97
  data_dir="data/gaia",
98
  save_to=f"results/result.json"
99
  )
100
 
101
+ # Print benchmark information
102
  print(f"Number of validation examples: {len(benchmark.valid)}")
103
  print(f"Number of test examples: {len(benchmark.test)}")
104
 
105
+ # Run benchmark
106
  result = benchmark.run(
107
  on="valid",
108
  level=LEVEL,
109
  idx=test_idx,
110
  save_result=SAVE_RESULT,
111
+ user_role_name="user",
 
112
  user_agent_kwargs=user_agent_kwargs,
113
+ assistant_role_name="assistant",
114
  assistant_agent_kwargs=assistant_agent_kwargs,
115
+ )
116
 
117
+ # Output results
118
  logger.success(f"Correct: {result['correct']}, Total: {result['total']}")
119
  logger.success(f"Accuracy: {result['accuracy']}")
120
 
owl/run_mini.py CHANGED
@@ -3,75 +3,101 @@ load_dotenv()
3
 
4
  from camel.models import ModelFactory
5
  from camel.toolkits import (
6
- WebToolkit,
7
  SearchToolkit,
8
- FunctionTool
9
- )
10
  from camel.types import ModelPlatformType, ModelType
11
 
12
-
13
- from loguru import logger
14
-
15
  from utils import OwlRolePlaying, run_society
16
 
17
 
18
-
19
  def construct_society(question: str) -> OwlRolePlaying:
20
- r"""Construct the society based on the question."""
21
-
22
- user_role_name = "user"
23
- assistant_role_name = "assistant"
24
 
25
- user_model = ModelFactory.create(
26
- model_platform=ModelPlatformType.OPENAI,
27
- model_type=ModelType.GPT_4O,
28
- )
29
-
30
- assistant_model = ModelFactory.create(
31
- model_platform=ModelPlatformType.OPENAI,
32
- model_type=ModelType.GPT_4O,
33
- )
34
-
35
- tools_list = [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  *WebToolkit(
37
- headless=False,
38
- web_agent_model=assistant_model,
39
- planning_agent_model=assistant_model
40
  ).get_tools(),
41
- FunctionTool(SearchToolkit(model=assistant_model).search_duckduckgo),
42
  ]
43
-
44
- user_role_name = 'user'
45
- user_agent_kwargs = dict(model=user_model)
46
- assistant_role_name = 'assistant'
47
- assistant_agent_kwargs = dict(model=assistant_model,
48
- tools=tools_list)
49
 
 
 
 
 
 
50
  task_kwargs = {
51
- 'task_prompt': question,
52
- 'with_task_specify': False,
53
  }
54
-
 
55
  society = OwlRolePlaying(
56
  **task_kwargs,
57
- user_role_name=user_role_name,
58
  user_agent_kwargs=user_agent_kwargs,
59
- assistant_role_name=assistant_role_name,
60
  assistant_agent_kwargs=assistant_agent_kwargs,
61
  )
62
 
63
  return society
64
 
65
 
66
- # Example case
67
- question = "What was the volume in m^3 of the fish bag that was calculated in the University of Leicester paper `Can Hiccup Supply Enough Fish to Maintain a Dragon’s Diet?` "
68
-
69
- society = construct_society(question)
70
- answer, chat_history, token_count = run_society(society)
71
-
72
- logger.success(f"Answer: {answer}")
73
-
74
-
75
-
 
 
 
 
 
76
 
77
 
 
 
 
3
 
4
  from camel.models import ModelFactory
5
  from camel.toolkits import (
 
6
  SearchToolkit,
7
+ WebToolkit,
8
+ )
9
  from camel.types import ModelPlatformType, ModelType
10
 
 
 
 
11
  from utils import OwlRolePlaying, run_society
12
 
13
 
 
14
  def construct_society(question: str) -> OwlRolePlaying:
15
+ r"""Construct a society of agents based on the given question.
 
 
 
16
 
17
+ Args:
18
+ question (str): The task or question to be addressed by the society.
19
+
20
+ Returns:
21
+ OwlRolePlaying: A configured society of agents ready to address the question.
22
+ """
23
+
24
+ # Create models for different components
25
+ models = {
26
+ "user": ModelFactory.create(
27
+ model_platform=ModelPlatformType.OPENAI,
28
+ model_type=ModelType.GPT_4O,
29
+ model_config_dict={"temperature": 0},
30
+ ),
31
+ "assistant": ModelFactory.create(
32
+ model_platform=ModelPlatformType.OPENAI,
33
+ model_type=ModelType.GPT_4O,
34
+ model_config_dict={"temperature": 0},
35
+ ),
36
+ "web": ModelFactory.create(
37
+ model_platform=ModelPlatformType.OPENAI,
38
+ model_type=ModelType.GPT_4O,
39
+ model_config_dict={"temperature": 0},
40
+ ),
41
+ "planning": ModelFactory.create(
42
+ model_platform=ModelPlatformType.OPENAI,
43
+ model_type=ModelType.GPT_4O,
44
+ model_config_dict={"temperature": 0},
45
+ ),
46
+ "search": ModelFactory.create(
47
+ model_platform=ModelPlatformType.OPENAI,
48
+ model_type=ModelType.GPT_4O,
49
+ model_config_dict={"temperature": 0},
50
+ ),
51
+ }
52
+
53
+ # Configure toolkits
54
+ tools = [
55
  *WebToolkit(
56
+ headless=False, # Set to True for headless mode (e.g., on remote servers)
57
+ web_agent_model=models["web"],
58
+ planning_agent_model=models["planning"],
59
  ).get_tools(),
60
+ *SearchToolkit(model=models["search"]).get_tools(),
61
  ]
 
 
 
 
 
 
62
 
63
+ # Configure agent roles and parameters
64
+ user_agent_kwargs = {"model": models["user"]}
65
+ assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
66
+
67
+ # Configure task parameters
68
  task_kwargs = {
69
+ "task_prompt": question,
70
+ "with_task_specify": False,
71
  }
72
+
73
+ # Create and return the society
74
  society = OwlRolePlaying(
75
  **task_kwargs,
76
+ user_role_name="user",
77
  user_agent_kwargs=user_agent_kwargs,
78
+ assistant_role_name="assistant",
79
  assistant_agent_kwargs=assistant_agent_kwargs,
80
  )
81
 
82
  return society
83
 
84
 
85
+ def main():
86
+ r"""Main function to run the OWL system with an example question."""
87
+ # Example research question
88
+ question = (
89
+ "What was the volume in m^3 of the fish bag that was calculated in "
90
+ "the University of Leicester paper `Can Hiccup Supply Enough Fish "
91
+ "to Maintain a Dragon's Diet?`"
92
+ )
93
+
94
+ # Construct and run the society
95
+ society = construct_society(question)
96
+ answer, chat_history, token_count = run_society(society)
97
+
98
+ # Output the result
99
+ print(f"Answer: {answer}")
100
 
101
 
102
+ if __name__ == "__main__":
103
+ main()
owl/run_openai_compatiable_model.py CHANGED
@@ -1,92 +1,129 @@
1
-
2
  from dotenv import load_dotenv
3
  load_dotenv()
4
 
 
5
  from camel.models import ModelFactory
6
- from camel.toolkits import *
 
 
 
 
 
 
 
7
  from camel.types import ModelPlatformType, ModelType
8
- from camel.configs import ChatGPTConfig
9
-
10
- from typing import List, Dict
11
-
12
- from retry import retry
13
- from loguru import logger
14
 
15
  from utils import OwlRolePlaying, run_society
16
- import os
17
-
18
-
19
 
20
 
21
  def construct_society(question: str) -> OwlRolePlaying:
22
- r"""Construct the society based on the question."""
23
-
24
- user_role_name = "user"
25
- assistant_role_name = "assistant"
26
 
27
- user_model = ModelFactory.create(
28
- model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
29
- model_type="qwen-max",
30
- api_key=os.getenv("QWEN_API_KEY"),
31
- url="https://dashscope.aliyuncs.com/compatible-mode/v1",
32
- model_config_dict={"temperature": 0.4, "max_tokens": 4096},
33
- )
34
-
35
- assistant_model = ModelFactory.create(
36
- model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
37
- model_type="qwen-max",
38
- api_key=os.getenv("QWEN_API_KEY"),
39
- url="https://dashscope.aliyuncs.com/compatible-mode/v1",
40
- model_config_dict={"temperature": 0.4, "max_tokens": 4096},
41
- )
42
-
43
-
44
- tools_list = [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  *WebToolkit(
46
- headless=False, # Set to True if you want to run in headless mode (e.g. on a remote server)
47
- web_agent_model=assistant_model,
48
- planning_agent_model=assistant_model
49
  ).get_tools(),
50
  *DocumentProcessingToolkit().get_tools(),
51
- # *VideoAnalysisToolkit(model=assistant_model).get_tools(), # This requires OpenAI Key
52
- # *AudioAnalysisToolkit().get_tools(), # This requires OpenAI Key
53
- *CodeExecutionToolkit().get_tools(),
54
- *ImageAnalysisToolkit(model=assistant_model).get_tools(),
55
- *SearchToolkit(model=assistant_model).get_tools(),
56
- *ExcelToolkit().get_tools()
57
  ]
58
-
59
- user_role_name = 'user'
60
- user_agent_kwargs = dict(model=user_model)
61
- assistant_role_name = 'assistant'
62
- assistant_agent_kwargs = dict(model=assistant_model,
63
- tools=tools_list)
64
 
 
 
 
 
 
65
  task_kwargs = {
66
- 'task_prompt': question,
67
- 'with_task_specify': False,
68
  }
69
-
 
70
  society = OwlRolePlaying(
71
  **task_kwargs,
72
- user_role_name=user_role_name,
73
  user_agent_kwargs=user_agent_kwargs,
74
- assistant_role_name=assistant_role_name,
75
  assistant_agent_kwargs=assistant_agent_kwargs,
76
  )
77
 
78
  return society
79
 
80
 
81
- # Example case
82
- question = "what is the weather in beijing today?"
83
-
84
- society = construct_society(question)
85
- answer, chat_history, token_count = run_society(society)
86
-
87
- logger.success(f"Answer: {answer}")
88
-
89
-
90
-
 
 
 
 
 
91
 
92
 
 
 
 
 
1
  from dotenv import load_dotenv
2
  load_dotenv()
3
 
4
+ import os
5
  from camel.models import ModelFactory
6
+ from camel.toolkits import (
7
+ CodeExecutionToolkit,
8
+ DocumentProcessingToolkit,
9
+ ExcelToolkit,
10
+ ImageAnalysisToolkit,
11
+ SearchToolkit,
12
+ WebToolkit,
13
+ )
14
  from camel.types import ModelPlatformType, ModelType
 
 
 
 
 
 
15
 
16
  from utils import OwlRolePlaying, run_society
 
 
 
17
 
18
 
19
  def construct_society(question: str) -> OwlRolePlaying:
20
+ r"""Construct a society of agents based on the given question.
 
 
 
21
 
22
+ Args:
23
+ question (str): The task or question to be addressed by the society.
24
+
25
+ Returns:
26
+ OwlRolePlaying: A configured society of agents ready to address the question.
27
+ """
28
+
29
+ # Create models for different components
30
+ models = {
31
+ "user": ModelFactory.create(
32
+ model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
33
+ model_type="qwen-max",
34
+ api_key=os.getenv("QWEN_API_KEY"),
35
+ url="https://dashscope.aliyuncs.com/compatible-mode/v1",
36
+ model_config_dict={"temperature": 0.4, "max_tokens": 4096},
37
+ ),
38
+ "assistant": ModelFactory.create(
39
+ model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
40
+ model_type="qwen-max",
41
+ api_key=os.getenv("QWEN_API_KEY"),
42
+ url="https://dashscope.aliyuncs.com/compatible-mode/v1",
43
+ model_config_dict={"temperature": 0.4, "max_tokens": 4096},
44
+ ),
45
+ "web": ModelFactory.create(
46
+ model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
47
+ model_type="qwen-max",
48
+ api_key=os.getenv("QWEN_API_KEY"),
49
+ url="https://dashscope.aliyuncs.com/compatible-mode/v1",
50
+ model_config_dict={"temperature": 0.4, "max_tokens": 4096},
51
+ ),
52
+ "planning": ModelFactory.create(
53
+ model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
54
+ model_type="qwen-max",
55
+ api_key=os.getenv("QWEN_API_KEY"),
56
+ url="https://dashscope.aliyuncs.com/compatible-mode/v1",
57
+ model_config_dict={"temperature": 0.4, "max_tokens": 4096},
58
+ ),
59
+ "image": ModelFactory.create(
60
+ model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
61
+ model_type="qwen-max",
62
+ api_key=os.getenv("QWEN_API_KEY"),
63
+ url="https://dashscope.aliyuncs.com/compatible-mode/v1",
64
+ model_config_dict={"temperature": 0.4, "max_tokens": 4096},
65
+ ),
66
+ "search": ModelFactory.create(
67
+ model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
68
+ model_type="qwen-max",
69
+ api_key=os.getenv("QWEN_API_KEY"),
70
+ url="https://dashscope.aliyuncs.com/compatible-mode/v1",
71
+ model_config_dict={"temperature": 0.4, "max_tokens": 4096},
72
+ ),
73
+ }
74
+
75
+ # Configure toolkits
76
+ tools = [
77
  *WebToolkit(
78
+ headless=False, # Set to True for headless mode (e.g., on remote servers)
79
+ web_agent_model=models["web"],
80
+ planning_agent_model=models["planning"],
81
  ).get_tools(),
82
  *DocumentProcessingToolkit().get_tools(),
83
+ *CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
84
+ *ImageAnalysisToolkit(model=models["image"]).get_tools(),
85
+ *SearchToolkit(model=models["search"]).get_tools(),
86
+ *ExcelToolkit().get_tools(),
 
 
87
  ]
 
 
 
 
 
 
88
 
89
+ # Configure agent roles and parameters
90
+ user_agent_kwargs = {"model": models["user"]}
91
+ assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
92
+
93
+ # Configure task parameters
94
  task_kwargs = {
95
+ "task_prompt": question,
96
+ "with_task_specify": False,
97
  }
98
+
99
+ # Create and return the society
100
  society = OwlRolePlaying(
101
  **task_kwargs,
102
+ user_role_name="user",
103
  user_agent_kwargs=user_agent_kwargs,
104
+ assistant_role_name="assistant",
105
  assistant_agent_kwargs=assistant_agent_kwargs,
106
  )
107
 
108
  return society
109
 
110
 
111
+ def main():
112
+ r"""Main function to run the OWL system with an example question."""
113
+ # Example research question
114
+ question = (
115
+ "What was the volume in m^3 of the fish bag that was calculated in "
116
+ "the University of Leicester paper `Can Hiccup Supply Enough Fish "
117
+ "to Maintain a Dragon's Diet?`"
118
+ )
119
+
120
+ # Construct and run the society
121
+ society = construct_society(question)
122
+ answer, chat_history, token_count = run_society(society)
123
+
124
+ # Output the result
125
+ print(f"Answer: {answer}")
126
 
127
 
128
+ if __name__ == "__main__":
129
+ main()
owl/run_qwen.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dotenv import load_dotenv
2
+ load_dotenv()
3
+
4
+ from camel.models import ModelFactory
5
+ from camel.toolkits import (
6
+ CodeExecutionToolkit,
7
+ DocumentProcessingToolkit,
8
+ ExcelToolkit,
9
+ ImageAnalysisToolkit,
10
+ SearchToolkit,
11
+ WebToolkit,
12
+ )
13
+ from camel.types import ModelPlatformType, ModelType
14
+
15
+ from utils import OwlRolePlaying, run_society
16
+
17
+
18
+ def construct_society(question: str) -> OwlRolePlaying:
19
+ r"""Construct a society of agents based on the given question.
20
+
21
+ Args:
22
+ question (str): The task or question to be addressed by the society.
23
+
24
+ Returns:
25
+ OwlRolePlaying: A configured society of agents ready to address the question.
26
+ """
27
+
28
+ # Create models for different components
29
+ models = {
30
+ "user": ModelFactory.create(
31
+ model_platform=ModelPlatformType.QWEN,
32
+ model_type=ModelType.QWEN_MAX,
33
+ model_config_dict={"temperature": 0},
34
+ ),
35
+ "assistant": ModelFactory.create(
36
+ model_platform=ModelPlatformType.QWEN,
37
+ model_type=ModelType.QWEN_MAX,
38
+ model_config_dict={"temperature": 0},
39
+ ),
40
+ "web": ModelFactory.create(
41
+ model_platform=ModelPlatformType.QWEN,
42
+ model_type=ModelType.QWEN_MAX,
43
+ model_config_dict={"temperature": 0},
44
+ ),
45
+ "planning": ModelFactory.create(
46
+ model_platform=ModelPlatformType.QWEN,
47
+ model_type=ModelType.QWEN_MAX,
48
+ model_config_dict={"temperature": 0},
49
+ ),
50
+ "video": ModelFactory.create(
51
+ model_platform=ModelPlatformType.QWEN,
52
+ model_type=ModelType.QWEN_MAX,
53
+ model_config_dict={"temperature": 0},
54
+ ),
55
+ "image": ModelFactory.create(
56
+ model_platform=ModelPlatformType.QWEN,
57
+ model_type=ModelType.QWEN_MAX,
58
+ model_config_dict={"temperature": 0},
59
+ ),
60
+ "search": ModelFactory.create(
61
+ model_platform=ModelPlatformType.QWEN,
62
+ model_type=ModelType.QWEN_MAX,
63
+ model_config_dict={"temperature": 0},
64
+ ),
65
+ }
66
+
67
+ # Configure toolkits
68
+ tools = [
69
+ *WebToolkit(
70
+ headless=False, # Set to True for headless mode (e.g., on remote servers)
71
+ web_agent_model=models["web"],
72
+ planning_agent_model=models["planning"],
73
+ ).get_tools(),
74
+ *DocumentProcessingToolkit().get_tools(),
75
+ *CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
76
+ *ImageAnalysisToolkit(model=models["image"]).get_tools(),
77
+ *SearchToolkit(model=models["search"]).get_tools(),
78
+ *ExcelToolkit().get_tools(),
79
+ ]
80
+
81
+ # Configure agent roles and parameters
82
+ user_agent_kwargs = {"model": models["user"]}
83
+ assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
84
+
85
+ # Configure task parameters
86
+ task_kwargs = {
87
+ "task_prompt": question,
88
+ "with_task_specify": False,
89
+ }
90
+
91
+ # Create and return the society
92
+ society = OwlRolePlaying(
93
+ **task_kwargs,
94
+ user_role_name="user",
95
+ user_agent_kwargs=user_agent_kwargs,
96
+ assistant_role_name="assistant",
97
+ assistant_agent_kwargs=assistant_agent_kwargs,
98
+ )
99
+
100
+ return society
101
+
102
+
103
+ def main():
104
+ r"""Main function to run the OWL system with an example question."""
105
+ # Example research question
106
+ question = (
107
+ "What was the volume in m^3 of the fish bag that was calculated in "
108
+ "the University of Leicester paper `Can Hiccup Supply Enough Fish "
109
+ "to Maintain a Dragon's Diet?`"
110
+ )
111
+
112
+ # Construct and run the society
113
+ society = construct_society(question)
114
+ answer, chat_history, token_count = run_society(society)
115
+
116
+ # Output the result
117
+ print(f"Answer: {answer}")
118
+
119
+
120
+ if __name__ == "__main__":
121
+ main()