Pra-tham commited on
Commit
35deaeb
Β·
1 Parent(s): 48bf4bb

added all features

Browse files
Files changed (3) hide show
  1. app.py +21 -12
  2. codeexecutor.py +22 -1
  3. temp.py +4 -0
app.py CHANGED
@@ -2,7 +2,9 @@ import gradio as gr
2
  import ctranslate2
3
  from transformers import AutoTokenizer
4
  from huggingface_hub import snapshot_download
5
- from codeexecutor import get_majority_vote
 
 
6
  import re
7
  import os
8
  # Define the model and tokenizer loading
@@ -41,7 +43,7 @@ def parse_prediction(prediction):
41
  if answer is None:
42
  # If no "Answer:" found, assume last line is the answer
43
  answer = lines[-1].strip()
44
- steps = lines[:-1]
45
  steps_text = '\n'.join(steps).strip()
46
  return answer, steps_text
47
 
@@ -53,10 +55,16 @@ def majority_vote_with_steps(question, num_iterations=10):
53
 
54
  for _ in range(num_iterations):
55
  prediction = get_prediction(question)
56
- answer, steps = parse_prediction(prediction)
57
- all_predictions.append(prediction)
58
- all_answers.append(answer)
59
- steps_list.append(steps)
 
 
 
 
 
 
60
 
61
  # Get the majority voted answer
62
  majority_voted_ans = get_majority_vote(all_answers)
@@ -65,13 +73,14 @@ def majority_vote_with_steps(question, num_iterations=10):
65
  for i, ans in enumerate(all_answers):
66
  if ans == majority_voted_ans:
67
  steps_solution = steps_list[i]
 
68
  break
69
  else:
 
70
  steps_solution = "No steps found"
71
 
72
- return majority_voted_ans, steps_solution
73
 
74
- # Gradio interface for user input and output
75
  def gradio_interface(question, correct_answer):
76
  final_answer, steps_solution = majority_vote_with_steps(question, iterations)
77
  return question, final_answer, steps_solution, correct_answer
@@ -166,7 +175,7 @@ custom_css = """
166
  """
167
 
168
  # Define the directory path
169
- flagging_dir = "./flagged_data"
170
 
171
  # Create the directory if it doesn't exist
172
  if not os.path.exists(flagging_dir):
@@ -179,9 +188,9 @@ interface = gr.Interface(
179
  gr.Textbox(label="🧠 Math Question", placeholder="Enter your math question here...", elem_id="math_question"),
180
  ],
181
  outputs=[
182
- gr.Textbox(label="Majority-Voted Answer", interactive=False), # Non-editable
183
- gr.Textbox(label="Steps to Solve", interactive=False), # Non-editable
184
- gr.Textbox(label="βœ… Correct Solution", interactive=True), # Editable textbox for correct solution
185
  ],
186
  title="πŸ”’ Math Question Solver",
187
  description="Enter a math question to get the model's majority-voted answer and steps to solve the problem.",
 
2
  import ctranslate2
3
  from transformers import AutoTokenizer
4
  from huggingface_hub import snapshot_download
5
+ from codeexecutor import get_majority_vote,type_check
6
+ import codeexecutor
7
+
8
  import re
9
  import os
10
  # Define the model and tokenizer loading
 
43
  if answer is None:
44
  # If no "Answer:" found, assume last line is the answer
45
  answer = lines[-1].strip()
46
+ steps = lines
47
  steps_text = '\n'.join(steps).strip()
48
  return answer, steps_text
49
 
 
55
 
56
  for _ in range(num_iterations):
57
  prediction = get_prediction(question)
58
+ answer,sucess= postprocess_completion(prediction, return_status=True, last_code_block=True)
59
+ if sucess:
60
+ all_predictions.append(prediction)
61
+ all_answers.append(answer)
62
+ steps_list.append(steps)
63
+ else:
64
+ answer, steps = parse_prediction(prediction)
65
+ all_predictions.append(prediction)
66
+ all_answers.append(answer)
67
+ steps_list.append(steps)
68
 
69
  # Get the majority voted answer
70
  majority_voted_ans = get_majority_vote(all_answers)
 
73
  for i, ans in enumerate(all_answers):
74
  if ans == majority_voted_ans:
75
  steps_solution = steps_list[i]
76
+ answer=parse_prediction(steps_solution)
77
  break
78
  else:
79
+ answer=majority_voted_ans
80
  steps_solution = "No steps found"
81
 
82
+ return answer, steps_solution
83
 
 
84
  def gradio_interface(question, correct_answer):
85
  final_answer, steps_solution = majority_vote_with_steps(question, iterations)
86
  return question, final_answer, steps_solution, correct_answer
 
175
  """
176
 
177
  # Define the directory path
178
+ flagging_dir = "./flagged_data"
179
 
180
  # Create the directory if it doesn't exist
181
  if not os.path.exists(flagging_dir):
 
188
  gr.Textbox(label="🧠 Math Question", placeholder="Enter your math question here...", elem_id="math_question"),
189
  ],
190
  outputs=[
191
+ gr.Textbox(label="Question", interactive=False), # Non-editable
192
+ gr.Textbox(label="Answer", interactive=False), # Non-editable
193
+ gr.Textbox(label="Solution", interactive=True), # Editable textbox for correct solution
194
  ],
195
  title="πŸ”’ Math Question Solver",
196
  description="Enter a math question to get the model's majority-voted answer and steps to solve the problem.",
codeexecutor.py CHANGED
@@ -97,7 +97,7 @@ def execute_completion(executor, completion, return_status, last_code_block):
97
  success = successes[-1]
98
  if return_status:
99
  return output, success
100
- return output
101
 
102
 
103
  def postprocess_completion(text, return_status, last_code_block):
@@ -113,3 +113,24 @@ def get_majority_vote(answers):
113
  c = Counter(answers)
114
  value, _ = c.most_common()[0]
115
  return value
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  success = successes[-1]
98
  if return_status:
99
  return output, success
100
+ return output ,False
101
 
102
 
103
  def postprocess_completion(text, return_status, last_code_block):
 
113
  c = Counter(answers)
114
  value, _ = c.most_common()[0]
115
  return value
116
+
117
+
118
+ def type_check(self,expr_str):
119
+
120
+
121
+ expr = sp.sympify(expr_str)
122
+
123
+ # Check if the expression is a real number
124
+ if expr.is_real:
125
+ return "Real"
126
+
127
+ # Check if the expression is a complex number
128
+ if expr.is_complex:
129
+ return "Complex"
130
+
131
+ # Check if the expression is a polynomial
132
+ if expr.is_polynomial():
133
+ return "Polynomial"
134
+
135
+ # Otherwise, classify as other
136
+ return "Other"
temp.py CHANGED
@@ -3,6 +3,7 @@ import ctranslate2
3
  from transformers import AutoTokenizer
4
  from huggingface_hub import snapshot_download
5
  from codeexecutor import get_majority_vote
 
6
  import re
7
 
8
  # Define the model and tokenizer loading
@@ -11,6 +12,9 @@ tokenizer = AutoTokenizer.from_pretrained("AI-MO/NuminaMath-7B-TIR")
11
  model_path = snapshot_download(repo_id="Makima57/deepseek-math-Numina")
12
  generator = ctranslate2.Generator(model_path, device="cpu", compute_type="int8")
13
  iterations = 10
 
 
 
14
 
15
  # Function to generate predictions using the model
16
  def get_prediction(question):
 
3
  from transformers import AutoTokenizer
4
  from huggingface_hub import snapshot_download
5
  from codeexecutor import get_majority_vote
6
+ import codeexecutor
7
  import re
8
 
9
  # Define the model and tokenizer loading
 
12
  model_path = snapshot_download(repo_id="Makima57/deepseek-math-Numina")
13
  generator = ctranslate2.Generator(model_path, device="cpu", compute_type="int8")
14
  iterations = 10
15
+ executor = PythonREPL()
16
+
17
+
18
 
19
  # Function to generate predictions using the model
20
  def get_prediction(question):