Update app.py
Browse files
app.py
CHANGED
@@ -130,7 +130,7 @@ def load_image(image_file):
|
|
130 |
def vision_ai_api(image, doc_type):
|
131 |
"""Run the model using a dynamic prompt based on detected doc type."""
|
132 |
pixel_values = load_image(image).to(torch.float32).to("cpu")
|
133 |
-
generation_config = dict(max_new_tokens=
|
134 |
|
135 |
question = front_prompt if doc_type == "front" else back_prompt if doc_type == "back" else "Please provide document details."
|
136 |
|
@@ -143,7 +143,7 @@ def vision_ai_api(image, doc_type):
|
|
143 |
gc.collect() # Force garbage collection
|
144 |
torch.cuda.empty_cache()
|
145 |
|
146 |
-
return f'
|
147 |
|
148 |
# ---------------------------
|
149 |
# PREDICTION PIPELINE
|
|
|
130 |
def vision_ai_api(image, doc_type):
|
131 |
"""Run the model using a dynamic prompt based on detected doc type."""
|
132 |
pixel_values = load_image(image).to(torch.float32).to("cpu")
|
133 |
+
generation_config = dict(max_new_tokens=512, do_sample=True)
|
134 |
|
135 |
question = front_prompt if doc_type == "front" else back_prompt if doc_type == "back" else "Please provide document details."
|
136 |
|
|
|
143 |
gc.collect() # Force garbage collection
|
144 |
torch.cuda.empty_cache()
|
145 |
|
146 |
+
return f'Assistant: {response}'
|
147 |
|
148 |
# ---------------------------
|
149 |
# PREDICTION PIPELINE
|