Docfile commited on
Commit
e7761b5
·
verified ·
1 Parent(s): 1bfe49c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +177 -90
app.py CHANGED
@@ -1,46 +1,73 @@
 
 
1
  from flask import Flask, render_template, request, jsonify, Response, stream_with_context
 
2
  from google import genai
 
3
  from google.genai import types
4
  import os
5
  from PIL import Image
6
  import io
7
  import base64
8
  import json
 
9
 
10
  app = Flask(__name__)
11
 
12
  GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")
13
 
 
14
  client = genai.Client(
15
  api_key=GOOGLE_API_KEY,
16
  )
17
 
 
 
 
 
 
 
18
  @app.route('/')
19
  def index():
20
- return render_template('index.html')
 
21
 
22
  @app.route('/free')
23
  def indexx():
 
24
  return render_template('maj.html')
25
 
 
 
26
  @app.route('/solve', methods=['POST'])
27
  def solve():
28
  try:
 
 
 
29
  image_data = request.files['image'].read()
30
- img = Image.open(io.BytesIO(image_data))
 
 
 
 
 
 
31
 
32
  buffered = io.BytesIO()
33
  img.save(buffered, format="PNG")
34
- img_str = base64.b64encode(buffered.getvalue()).decode()
35
 
36
  def generate():
37
  mode = 'starting'
38
  try:
39
  response = client.models.generate_content_stream(
40
- model="gemini-2.5-pro-exp-03-25",
 
41
  contents=[
 
42
  {'inline_data': {'mime_type': 'image/png', 'data': img_str}},
43
- """Résous cet exercice en français avec du LaTeX.
44
  Si nécessaire, utilise du code Python pour effectuer les calculs complexes.
45
  Présente ta solution de façon claire et espacée."""
46
  ],
@@ -54,35 +81,52 @@ def solve():
54
  )
55
  )
56
 
 
57
  for chunk in response:
58
- for part in chunk.candidates[0].content.parts:
59
- if hasattr(part, 'thought') and part.thought:
60
- if mode != "thinking":
61
- yield 'data: ' + json.dumps({"mode": "thinking"}) + '\n\n'
62
- mode = "thinking"
63
- elif hasattr(part, 'executable_code') and part.executable_code:
64
- if mode != "executing_code":
65
- yield 'data: ' + json.dumps({"mode": "executing_code"}) + '\n\n'
66
- mode = "executing_code"
67
- code_block_open = "```python\n"
68
- code_block_close = "\n```"
69
- yield 'data: ' + json.dumps({"content": code_block_open + part.executable_code.code + code_block_close}) + '\n\n'
70
- elif hasattr(part, 'code_execution_result') and part.code_execution_result:
71
- if mode != "code_result":
72
- yield 'data: ' + json.dumps({"mode": "code_result"}) + '\n\n'
73
- mode = "code_result"
74
- result_block_open = "Résultat d'exécution:\n```\n"
75
- result_block_close = "\n```"
76
- yield 'data: ' + json.dumps({"content": result_block_open + part.code_execution_result.output + result_block_close}) + '\n\n'
77
- else:
78
- if mode != "answering":
79
- yield 'data: ' + json.dumps({"mode": "answering"}) + '\n\n'
80
- mode = "answering"
81
- if hasattr(part, 'text') and part.text:
82
- yield 'data: ' + json.dumps({"content": part.text}) + '\n\n'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
  except Exception as e:
85
- print(f"Error during generation: {e}")
86
  yield 'data: ' + json.dumps({"error": str(e)}) + '\n\n'
87
 
88
  return Response(
@@ -95,78 +139,121 @@ def solve():
95
  )
96
 
97
  except Exception as e:
98
- return jsonify({'error': str(e)}), 500
 
 
99
 
 
 
100
  @app.route('/solved', methods=['POST'])
101
  def solved():
102
  try:
 
 
 
103
  image_data = request.files['image'].read()
104
- img = Image.open(io.BytesIO(image_data))
 
105
 
106
- buffered = io.BytesIO()
 
 
 
 
 
107
  img.save(buffered, format="PNG")
108
  img_str = base64.b64encode(buffered.getvalue()).decode()
109
 
110
- def generate():
111
- mode = 'starting'
112
- try:
113
- response = client.models.generate_content_stream(
114
- model="gemini-2.5-flash-preview-04-17",
115
- contents=[
116
- {'inline_data': {'mime_type': 'image/png', 'data': img_str}},
117
- """Résous cet exercice en français avec du rendu latex.
118
- Si nécessaire, utilise du code Python pour effectuer les calculs complexes.
119
- Présente ta solution de façon claire et espacée."""
120
- ],
121
- config=types.GenerateContentConfig(
122
- tools=[types.Tool(
123
- code_execution=types.ToolCodeExecution()
124
- )]
125
- )
126
- )
127
-
128
- for chunk in response:
129
- for part in chunk.candidates[0].content.parts:
130
- if hasattr(part, 'thought') and part.thought:
131
- if mode != "thinking":
132
- yield 'data: ' + json.dumps({"mode": "thinking"}) + '\n\n'
133
- mode = "thinking"
134
- elif hasattr(part, 'executable_code') and part.executable_code:
135
- if mode != "executing_code":
136
- yield 'data: ' + json.dumps({"mode": "executing_code"}) + '\n\n'
137
- mode = "executing_code"
138
- code_block_open = "```python\n"
139
- code_block_close = "\n```"
140
- yield 'data: ' + json.dumps({"content": code_block_open + part.executable_code.code + code_block_close}) + '\n\n'
141
- elif hasattr(part, 'code_execution_result') and part.code_execution_result:
142
- if mode != "code_result":
143
- yield 'data: ' + json.dumps({"mode": "code_result"}) + '\n\n'
144
- mode = "code_result"
145
- result_block_open = "Résultat d'exécution:\n```\n"
146
- result_block_close = "\n```"
147
- yield 'data: ' + json.dumps({"content": result_block_open + part.code_execution_result.output + result_block_close}) + '\n\n'
148
- else:
149
- if mode != "answering":
150
- yield 'data: ' + json.dumps({"mode": "answering"}) + '\n\n'
151
- mode = "answering"
152
- if hasattr(part, 'text') and part.text:
153
- yield 'data: ' + json.dumps({"content": part.text}) + '\n\n'
154
 
155
- except Exception as e:
156
- print(f"Error during generation: {e}")
157
- yield 'data: ' + json.dumps({"error": str(e)}) + '\n\n'
 
 
 
 
158
 
159
- return Response(
160
- stream_with_context(generate()),
161
- mimetype='text/event-stream',
162
- headers={
163
- 'Cache-Control': 'no-cache',
164
- 'X-Accel-Buffering': 'no'
165
- }
 
 
 
 
166
  )
167
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
  except Exception as e:
169
- return jsonify({'error': str(e)}), 500
 
 
 
 
 
 
170
 
171
  if __name__ == '__main__':
172
- app.run(debug=True)
 
 
 
 
 
1
+ # --- START OF CORRECTED app.py ---
2
+
3
  from flask import Flask, render_template, request, jsonify, Response, stream_with_context
4
+ # Revert to the original google.genai import and usage
5
  from google import genai
6
+ # Make sure types is imported from google.genai if needed for specific model config
7
  from google.genai import types
8
  import os
9
  from PIL import Image
10
  import io
11
  import base64
12
  import json
13
+ import re # Import regex if needed for advanced text processing (though less likely without streaming logic parsing)
14
 
15
  app = Flask(__name__)
16
 
17
  GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")
18
 
19
+ # Use the original client initialization
20
  client = genai.Client(
21
  api_key=GOOGLE_API_KEY,
22
  )
23
 
24
+ # Ensure API key is available (good practice)
25
+ if not GOOGLE_API_KEY:
26
+ print("WARNING: GEMINI_API_KEY environment variable not set.")
27
+ # Handle this case appropriately, e.g., exit or show an error on the page
28
+
29
+ # --- Routes for index and potentially the Pro version (kept for context) ---
30
  @app.route('/')
31
  def index():
32
+ # Assuming index.html is for the Pro version or another page
33
+ return render_template('index.html') # Or redirect to /free if it's the main page
34
 
35
  @app.route('/free')
36
  def indexx():
37
+ # This route serves the free version HTML
38
  return render_template('maj.html')
39
 
40
+ # --- Original /solve route (Pro version, streaming) - Kept for reference ---
41
+ # If you want the Pro version (/solve) to also be non-streaming, apply similar changes as below
42
  @app.route('/solve', methods=['POST'])
43
  def solve():
44
  try:
45
+ if 'image' not in request.files or not request.files['image'].filename:
46
+ return jsonify({'error': 'No image file provided'}), 400
47
+
48
  image_data = request.files['image'].read()
49
+ if not image_data:
50
+ return jsonify({'error': 'Empty image file provided'}), 400
51
+
52
+ try:
53
+ img = Image.open(io.BytesIO(image_data))
54
+ except Exception as img_err:
55
+ return jsonify({'error': f'Invalid image file: {str(img_err)}'}), 400
56
 
57
  buffered = io.BytesIO()
58
  img.save(buffered, format="PNG")
59
+ img_str = base64.b64encode(buffered.getvalue()).decode() # Keep base64 for this route as in original
60
 
61
  def generate():
62
  mode = 'starting'
63
  try:
64
  response = client.models.generate_content_stream(
65
+ # Use the model name for the Pro version as in your original code
66
+ model="gemini-2.5-pro-exp-03-25", # Your original model name
67
  contents=[
68
+ # Pass image as inline_data with base64 as in your original code
69
  {'inline_data': {'mime_type': 'image/png', 'data': img_str}},
70
+ """Résous cet exercice en français avec du LaTeX.
71
  Si nécessaire, utilise du code Python pour effectuer les calculs complexes.
72
  Présente ta solution de façon claire et espacée."""
73
  ],
 
81
  )
82
  )
83
 
84
+ # Process the streaming response as you had it
85
  for chunk in response:
86
+ if chunk.candidates and chunk.candidates[0].content and chunk.candidates[0].content.parts:
87
+ for part in chunk.candidates[0].content.parts:
88
+ # Keep your original logic for emitting different modes in the stream
89
+ if hasattr(part, 'thought') and part.thought:
90
+ if mode != "thinking":
91
+ yield 'data: ' + json.dumps({"mode": "thinking"}) + '\n\n'
92
+ mode = "thinking"
93
+ elif hasattr(part, 'executable_code') and part.executable_code:
94
+ if mode != "executing_code":
95
+ yield 'data: ' + json.dumps({"mode": "executing_code"}) + '\n\n'
96
+ mode = "executing_code"
97
+ code_block_open = "```python\n"
98
+ code_block_close = "\n```"
99
+ yield 'data: ' + json.dumps({"content": code_block_open + part.executable_code.code + code_block_close}) + '\n\n'
100
+ elif hasattr(part, 'code_execution_result') and part.code_execution_result:
101
+ if mode != "code_result":
102
+ yield 'data: ' + json.dumps({"mode": "code_result"}) + '\n\n'
103
+ mode = "code_result"
104
+ result_block_open = "Résultat d'exécution:\n```\n"
105
+ result_block_close = "\n```"
106
+ yield 'data: ' + json.dumps({"content": result_block_open + part.code_execution_result.output + result_block_close}) + '\n\n'
107
+ else: # Assuming it's text
108
+ if mode != "answering":
109
+ yield 'data: ' + json.dumps({"mode": "answering"}) + '\n\n'
110
+ mode = "answering"
111
+ if hasattr(part, 'text') and part.text:
112
+ yield 'data: ' + json.dumps({"content": part.text}) + '\n\n'
113
+ # Handle cases where a chunk might not have candidates/parts immediately, or handle errors
114
+ elif chunk.prompt_feedback and chunk.prompt_feedback.block_reason:
115
+ error_msg = f"Prompt blocked: {chunk.prompt_feedback.block_reason.name}"
116
+ print(error_msg)
117
+ yield 'data: ' + json.dumps({"error": error_msg}) + '\n\n'
118
+ break # Stop processing on block
119
+ elif chunk.candidates and chunk.candidates[0].finish_reason:
120
+ finish_reason = chunk.candidates[0].finish_reason.name
121
+ if finish_reason != 'STOP':
122
+ error_msg = f"Generation finished early: {finish_reason}"
123
+ print(error_msg)
124
+ yield 'data: ' + json.dumps({"error": error_msg}) + '\n\n'
125
+ break # Stop processing on finish reason
126
+
127
 
128
  except Exception as e:
129
+ print(f"Error during streaming generation: {e}")
130
  yield 'data: ' + json.dumps({"error": str(e)}) + '\n\n'
131
 
132
  return Response(
 
139
  )
140
 
141
  except Exception as e:
142
+ print(f"Error in /solve endpoint: {e}")
143
+ # Return JSON error for fetch API if streaming setup fails
144
+ return jsonify({'error': f'Failed to process request: {str(e)}'}), 500
145
 
146
+
147
+ # --- MODIFIED /solved route (Free version, non-streaming) using original SDK syntax ---
148
  @app.route('/solved', methods=['POST'])
149
  def solved():
150
  try:
151
+ if 'image' not in request.files or not request.files['image'].filename:
152
+ return jsonify({'error': 'No image file provided'}), 400
153
+
154
  image_data = request.files['image'].read()
155
+ if not image_data:
156
+ return jsonify({'error': 'Empty image file provided'}), 400
157
 
158
+ try:
159
+ img = Image.open(io.BytesIO(image_data))
160
+ except Exception as img_err:
161
+ return jsonify({'error': f'Invalid image file: {str(img_err)}'}), 400
162
+
163
+ buffered = io.BytesBytesIO()
164
  img.save(buffered, format="PNG")
165
  img_str = base64.b64encode(buffered.getvalue()).decode()
166
 
167
+ # Use the non-streaming generate_content method
168
+ # Use the model name for the Free version as in your original code
169
+ model_name = "gemini-2.5-flash-preview-04-17" # Your original free model name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
 
171
+ # Prepare the content using inline_data with base64 string as in your original code
172
+ contents = [
173
+ {'inline_data': {'mime_type': 'image/png', 'data': img_str}},
174
+ """Résous cet exercice en français en utilisant le format LaTeX pour les mathématiques si nécessaire.
175
+ Si tu dois effectuer des calculs complexes, utilise l'outil d'exécution de code Python fourni.
176
+ Présente ta solution de manière claire et bien structurée. Formate le code Python dans des blocs délimités par ```python ... ``` et les résultats d'exécution dans des blocs ``` ... ```."""
177
+ ]
178
 
179
+ # Call the non-streaming generation method using the original client object
180
+ response = client.models.generate_content(
181
+ model=model_name,
182
+ contents=contents,
183
+ config=types.GenerateContentConfig(
184
+ # Removed thinking_config as it's not relevant for non-streaming output
185
+ tools=[types.Tool(
186
+ code_execution=types.ToolCodeExecution()
187
+ )]
188
+ )
189
+ # Note: No stream=True here for non-streaming
190
  )
191
 
192
+ # Aggregate the response parts into a single string
193
+ full_solution = ""
194
+ # Check if the response has candidates and parts
195
+ if response.candidates and response.candidates[0].content and response.candidates[0].content.parts:
196
+ for part in response.candidates[0].content.parts:
197
+ # Process parts based on attribute existence
198
+ if hasattr(part, 'text') and part.text:
199
+ full_solution += part.text
200
+ elif hasattr(part, 'executable_code') and part.executable_code:
201
+ # Format code block using Markdown, as the frontend expects this
202
+ full_solution += f"\n\n```python\n{part.executable_code.code}\n```\n\n"
203
+ # Check for the result attribute name based on your SDK version's structure
204
+ # It might be `code_execution_result` as in your original code, or nested
205
+ elif hasattr(part, 'code_execution_result') and hasattr(part.code_execution_result, 'output'):
206
+ # Format execution result block using Markdown
207
+ output_str = part.code_execution_result.output
208
+ full_solution += f"\n\n**Résultat d'exécution:**\n```\n{output_str}\n```\n\n"
209
+ # Add other potential part types if necessary (e.g., function_call, etc.)
210
+ # Note: 'thought' parts are ignored as requested
211
+
212
+ # Ensure we have some content, otherwise return a message
213
+ if not full_solution.strip():
214
+ # Check for finish reasons or safety ratings
215
+ finish_reason = response.candidates[0].finish_reason.name if response.candidates and response.candidates[0].finish_reason else "UNKNOWN"
216
+ safety_ratings = response.candidates[0].safety_ratings if response.candidates else []
217
+ print(f"Generation finished with reason: {finish_reason}, Safety: {safety_ratings}") # Log details
218
+ if finish_reason == 'SAFETY':
219
+ full_solution = "Désolé, je ne peux pas fournir de réponse en raison de restrictions de sécurité."
220
+ elif finish_reason == 'RECITATION':
221
+ full_solution = "Désolé, la réponse ne peut être fournie en raison de la politique sur les récitations."
222
+ # Also check prompt feedback for blocking reasons
223
+ elif response.prompt_feedback and response.prompt_feedback.block_reason:
224
+ block_reason = response.prompt_feedback.block_reason.name
225
+ full_solution = f"Le contenu a été bloqué pour des raisons de sécurité: {block_reason}."
226
+ else:
227
+ full_solution = "Désolé, je n'ai pas pu générer de solution complète pour cette image."
228
+
229
+
230
+ # Return the complete solution as JSON
231
+ # Use strip() to remove leading/trailing whitespace from the full solution
232
+ return jsonify({'solution': full_solution.strip()})
233
+
234
+ # Catch specific API errors from your original SDK
235
+ except genai.core.exceptions.GoogleAPIError as api_error:
236
+ print(f"GenAI API Error: {api_error}")
237
+ # Check if the error response has details, like safety block
238
+ error_detail = str(api_error)
239
+ if "safety" in error_detail.lower():
240
+ return jsonify({'error': 'Le contenu a été bloqué pour des raisons de sécurité par l\'API.'}), 400
241
+ elif "blocked" in error_detail.lower():
242
+ return jsonify({'error': 'La requête a été bloquée par l\'API.'}), 400
243
+ else:
244
+ return jsonify({'error': f'Erreur de l\'API GenAI: {error_detail}'}), 500
245
  except Exception as e:
246
+ # Log the full error for debugging
247
+ import traceback
248
+ print(f"Error in /solved endpoint: {e}")
249
+ print(traceback.format_exc())
250
+ # Provide a generic error message to the user
251
+ return jsonify({'error': f'Une erreur interne est survenue lors du traitement: {str(e)}'}), 500
252
+
253
 
254
  if __name__ == '__main__':
255
+ # Set host='0.0.0.0' to make it accessible on your network if needed
256
+ # Remove debug=True in production
257
+ app.run(debug=True, host='0.0.0.0', port=5000) # Example port
258
+
259
+ # --- END OF CORRECTED app.py ---