ApsidalSolid4 commited on
Commit
f92a5da
·
verified ·
1 Parent(s): 225dc24

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -357
app.py CHANGED
@@ -18,10 +18,6 @@ from openpyxl.utils import get_column_letter
18
  from io import BytesIO
19
  import base64
20
  import hashlib
21
- import requests
22
- import tempfile
23
- from pathlib import Path
24
- import mimetypes
25
 
26
  # Configure logging
27
  logging.basicConfig(level=logging.INFO)
@@ -36,17 +32,6 @@ CONFIDENCE_THRESHOLD = 0.65
36
  BATCH_SIZE = 8 # Reduced batch size for CPU
37
  MAX_WORKERS = 4 # Number of worker threads for processing
38
 
39
- # IMPORTANT: Set PyTorch thread configuration at the module level
40
- # before any parallel work starts
41
- if not torch.cuda.is_available():
42
- # Set thread configuration only once at the beginning
43
- torch.set_num_threads(MAX_WORKERS)
44
- try:
45
- # Only set interop threads if it hasn't been set already
46
- torch.set_num_interop_threads(MAX_WORKERS)
47
- except RuntimeError as e:
48
- logger.warning(f"Could not set interop threads: {str(e)}")
49
-
50
  # Get password hash from environment variable (more secure)
51
  ADMIN_PASSWORD_HASH = os.environ.get('ADMIN_PASSWORD_HASH')
52
 
@@ -56,138 +41,10 @@ if not ADMIN_PASSWORD_HASH:
56
  # Excel file path for logs
57
  EXCEL_LOG_PATH = "/tmp/prediction_logs.xlsx"
58
 
59
- # OCR API settings
60
- OCR_API_KEY = "9e11346f1288957" # This is a partial key - replace with the full one
61
- OCR_API_ENDPOINT = "https://api.ocr.space/parse/image"
62
- OCR_MAX_PDF_PAGES = 3
63
- OCR_MAX_FILE_SIZE_MB = 1
64
-
65
- # Configure logging for OCR module
66
- ocr_logger = logging.getLogger("ocr_module")
67
- ocr_logger.setLevel(logging.INFO)
68
-
69
- class OCRProcessor:
70
- """
71
- Handles OCR processing of image and document files using OCR.space API
72
- """
73
- def __init__(self, api_key: str = OCR_API_KEY):
74
- self.api_key = api_key
75
- self.endpoint = OCR_API_ENDPOINT
76
-
77
- def process_file(self, file_path: str) -> Dict:
78
- """
79
- Process a file using OCR.space API
80
- """
81
- start_time = time.time()
82
- ocr_logger.info(f"Starting OCR processing for file: {os.path.basename(file_path)}")
83
-
84
- # Validate file size
85
- file_size_mb = os.path.getsize(file_path) / (1024 * 1024)
86
- if file_size_mb > OCR_MAX_FILE_SIZE_MB:
87
- ocr_logger.warning(f"File size ({file_size_mb:.2f} MB) exceeds limit of {OCR_MAX_FILE_SIZE_MB} MB")
88
- return {
89
- "success": False,
90
- "error": f"File size ({file_size_mb:.2f} MB) exceeds limit of {OCR_MAX_FILE_SIZE_MB} MB",
91
- "text": ""
92
- }
93
-
94
- # Determine file type and handle accordingly
95
- file_type = self._get_file_type(file_path)
96
- ocr_logger.info(f"Detected file type: {file_type}")
97
-
98
- # Prepare the API request
99
- with open(file_path, 'rb') as f:
100
- file_data = f.read()
101
-
102
- # Set up API parameters
103
- payload = {
104
- 'isOverlayRequired': 'false',
105
- 'language': 'eng',
106
- 'OCREngine': '2', # Use more accurate engine
107
- 'scale': 'true',
108
- 'detectOrientation': 'true',
109
- }
110
-
111
- # For PDF files, check page count limitations
112
- if file_type == 'application/pdf':
113
- ocr_logger.info("PDF document detected, enforcing page limit")
114
- payload['filetype'] = 'PDF'
115
-
116
- # Prepare file for OCR API
117
- files = {
118
- 'file': (os.path.basename(file_path), file_data, file_type)
119
- }
120
-
121
- headers = {
122
- 'apikey': self.api_key,
123
- }
124
-
125
- # Make the OCR API request
126
- try:
127
- ocr_logger.info("Sending request to OCR.space API")
128
- response = requests.post(
129
- self.endpoint,
130
- files=files,
131
- data=payload,
132
- headers=headers
133
- )
134
- response.raise_for_status()
135
- result = response.json()
136
-
137
- # Process the OCR results
138
- if result.get('OCRExitCode') in [1, 2]: # Success or partial success
139
- extracted_text = self._extract_text_from_result(result)
140
- processing_time = time.time() - start_time
141
- ocr_logger.info(f"OCR processing completed in {processing_time:.2f} seconds")
142
-
143
- return {
144
- "success": True,
145
- "text": extracted_text,
146
- "word_count": len(extracted_text.split()),
147
- "processing_time_ms": int(processing_time * 1000)
148
- }
149
- else:
150
- ocr_logger.error(f"OCR API error: {result.get('ErrorMessage', 'Unknown error')}")
151
- return {
152
- "success": False,
153
- "error": result.get('ErrorMessage', 'OCR processing failed'),
154
- "text": ""
155
- }
156
-
157
- except requests.exceptions.RequestException as e:
158
- ocr_logger.error(f"OCR API request failed: {str(e)}")
159
- return {
160
- "success": False,
161
- "error": f"OCR API request failed: {str(e)}",
162
- "text": ""
163
- }
164
-
165
- def _extract_text_from_result(self, result: Dict) -> str:
166
- """
167
- Extract all text from the OCR API result
168
- """
169
- extracted_text = ""
170
-
171
- if 'ParsedResults' in result and result['ParsedResults']:
172
- for parsed_result in result['ParsedResults']:
173
- if parsed_result.get('ParsedText'):
174
- extracted_text += parsed_result['ParsedText']
175
-
176
- return extracted_text
177
-
178
- def _get_file_type(self, file_path: str) -> str:
179
- """
180
- Determine MIME type of a file
181
- """
182
- mime_type, _ = mimetypes.guess_type(file_path)
183
- if mime_type is None:
184
- # Default to binary if MIME type can't be determined
185
- return 'application/octet-stream'
186
- return mime_type
187
-
188
  def is_admin_password(input_text: str) -> bool:
189
  """
190
  Check if the input text matches the admin password using secure hash comparison.
 
191
  """
192
  # Hash the input text
193
  input_hash = hashlib.sha256(input_text.strip().encode()).hexdigest()
@@ -248,6 +105,11 @@ class TextWindowProcessor:
248
 
249
  class TextClassifier:
250
  def __init__(self):
 
 
 
 
 
251
  self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
252
  self.model_name = MODEL_NAME
253
  self.tokenizer = None
@@ -282,7 +144,6 @@ class TextClassifier:
282
 
283
  self.model.eval()
284
 
285
- # [Other TextClassifier methods remain the same as in paste.txt]
286
  def quick_scan(self, text: str) -> Dict:
287
  """Perform a quick scan using simple window analysis."""
288
  if not text.strip():
@@ -392,7 +253,7 @@ class TextClassifier:
392
  for window_idx, indices in enumerate(batch_indices):
393
  center_idx = len(indices) // 2
394
  center_weight = 0.7 # Higher weight for center sentence
395
- edge_weight = 0.3 / (len(indices) - 1) if len(indices) > 1 else 0 # Distribute remaining weight
396
 
397
  for pos, sent_idx in enumerate(indices):
398
  # Apply higher weight to center sentence
@@ -415,10 +276,10 @@ class TextClassifier:
415
 
416
  # Apply minimal smoothing at prediction boundaries
417
  if i > 0 and i < len(sentences) - 1:
418
- prev_human = sentence_scores[i-1]['human_prob'] / max(sentence_appearances[i-1], 1e-10)
419
- prev_ai = sentence_scores[i-1]['ai_prob'] / max(sentence_appearances[i-1], 1e-10)
420
- next_human = sentence_scores[i+1]['human_prob'] / max(sentence_appearances[i+1], 1e-10)
421
- next_ai = sentence_scores[i+1]['ai_prob'] / max(sentence_appearances[i+1], 1e-10)
422
 
423
  # Check if we're at a prediction boundary
424
  current_pred = 'human' if human_prob > ai_prob else 'ai'
@@ -493,72 +354,6 @@ class TextClassifier:
493
  'num_sentences': num_sentences
494
  }
495
 
496
- # Function to handle file upload, OCR processing, and text analysis
497
- def handle_file_upload_and_analyze(file_obj, mode: str, classifier) -> tuple:
498
- """
499
- Handle file upload, OCR processing, and text analysis
500
- """
501
- if file_obj is None:
502
- return (
503
- "No file uploaded",
504
- "Please upload a file to analyze",
505
- "No file uploaded for analysis"
506
- )
507
-
508
- # Create a temporary file with an appropriate extension based on content
509
- content_start = file_obj[:20] # Look at the first few bytes
510
-
511
- # Default to .bin extension
512
- file_ext = ".bin"
513
-
514
- # Try to detect PDF files
515
- if content_start.startswith(b'%PDF'):
516
- file_ext = ".pdf"
517
- # For images, detect by common magic numbers
518
- elif content_start.startswith(b'\xff\xd8'): # JPEG
519
- file_ext = ".jpg"
520
- elif content_start.startswith(b'\x89PNG'): # PNG
521
- file_ext = ".png"
522
- elif content_start.startswith(b'GIF'): # GIF
523
- file_ext = ".gif"
524
-
525
- # Create a temporary file with the detected extension
526
- with tempfile.NamedTemporaryFile(delete=False, suffix=file_ext) as temp_file:
527
- temp_file_path = temp_file.name
528
- # Write uploaded file data to the temporary file
529
- temp_file.write(file_obj)
530
-
531
- try:
532
- # Process the file with OCR
533
- ocr_processor = OCRProcessor()
534
- ocr_result = ocr_processor.process_file(temp_file_path)
535
-
536
- if not ocr_result["success"]:
537
- return (
538
- "OCR Processing Error",
539
- ocr_result["error"],
540
- "Failed to extract text from the uploaded file"
541
- )
542
-
543
- # Get the extracted text
544
- extracted_text = ocr_result["text"]
545
-
546
- # If no text was extracted
547
- if not extracted_text.strip():
548
- return (
549
- "No text extracted",
550
- "The OCR process did not extract any text from the uploaded file.",
551
- "No text was found in the uploaded file"
552
- )
553
-
554
- # Call the original text analysis function with the extracted text
555
- return analyze_text(extracted_text, mode, classifier)
556
-
557
- finally:
558
- # Clean up the temporary file
559
- if os.path.exists(temp_file_path):
560
- os.remove(temp_file_path)
561
-
562
  def initialize_excel_log():
563
  """Initialize the Excel log file if it doesn't exist."""
564
  if not os.path.exists(EXCEL_LOG_PATH):
@@ -586,7 +381,6 @@ def initialize_excel_log():
586
  wb.save(EXCEL_LOG_PATH)
587
  logger.info(f"Initialized Excel log file at {EXCEL_LOG_PATH}")
588
 
589
-
590
  def log_prediction_data(input_text, word_count, prediction, confidence, execution_time, mode):
591
  """Log prediction data to an Excel file in the /tmp directory."""
592
  # Initialize the Excel file if it doesn't exist
@@ -629,7 +423,6 @@ def log_prediction_data(input_text, word_count, prediction, confidence, executio
629
  logger.error(f"Error logging prediction data to Excel: {str(e)}")
630
  return False
631
 
632
-
633
  def get_logs_as_base64():
634
  """Read the Excel logs file and return as base64 for downloading."""
635
  if not os.path.exists(EXCEL_LOG_PATH):
@@ -648,7 +441,6 @@ def get_logs_as_base64():
648
  logger.error(f"Error reading Excel logs: {str(e)}")
649
  return None
650
 
651
-
652
  def analyze_text(text: str, mode: str, classifier: TextClassifier) -> tuple:
653
  """Analyze text using specified mode and return formatted results."""
654
  # Check if the input text matches the admin password using secure comparison
@@ -771,146 +563,47 @@ def analyze_text(text: str, mode: str, classifier: TextClassifier) -> tuple:
771
  # Initialize the classifier globally
772
  classifier = TextClassifier()
773
 
774
- # Create Gradio interface with a small file upload button next to the radio buttons
775
- # Modified approach - simplify by using custom HTML/CSS to achieve the exact layout
776
- def setup_interface():
777
- # Create analyzer functions
778
- def analyze_text_wrapper(text, mode):
779
- return analyze_text(text, mode, classifier)
780
-
781
- def handle_file_upload_wrapper(file_obj, mode):
782
- if file_obj is None:
783
- return analyze_text_wrapper("", mode)
784
- return handle_file_upload_and_analyze(file_obj, mode, classifier)
785
-
786
- def clear_inputs():
787
- return "", None, None, None
788
-
789
- # Create a custom CSS class
790
- css = """
791
- #analyze-btn {
792
- background-color: #FF8C00 !important;
793
- border-color: #FF8C00 !important;
794
- color: white !important;
795
- }
796
-
797
- .radio-with-icon {
798
- display: flex;
799
- align-items: center;
800
- }
801
-
802
- .paperclip-icon {
803
- display: inline-block;
804
- margin-left: 10px;
805
- font-size: 20px;
806
- cursor: pointer;
807
- opacity: 0.7;
808
- }
809
-
810
- .paperclip-icon:hover {
811
- opacity: 1;
812
- }
813
- """
814
-
815
- # Create the interface with custom CSS
816
- with gr.Blocks(title="AI Text Detector", css=css) as demo:
817
- gr.Markdown("# AI Text Detector")
818
-
819
- with gr.Row():
820
- # Left column - Input
821
- with gr.Column():
822
- text_input = gr.Textbox(
823
- lines=8,
824
- placeholder="Enter text to analyze...",
825
- label="Input Text"
826
- )
827
-
828
- gr.Markdown("Analysis Mode")
829
- gr.Markdown("Quick mode for faster analysis. Detailed mode for sentence-level analysis.")
830
-
831
- # Create a visible radio button row
832
- with gr.Row(elem_classes=["radio-with-icon"]):
833
- mode_selection = gr.Radio(
834
- choices=["quick", "detailed"],
835
- value="quick",
836
- label=""
837
- )
838
-
839
- # Create a button that looks like a paperclip and triggers file upload
840
- upload_trigger = gr.Button("📎", elem_classes=["paperclip-icon"])
841
-
842
- # Hidden file upload that will be triggered by the paperclip button
843
- file_upload = gr.File(
844
- file_types=["image", "pdf", "doc", "docx"],
845
- type="binary",
846
- visible=False
847
- )
848
-
849
- # Action buttons
850
- with gr.Row():
851
- clear_btn = gr.Button("Clear")
852
- analyze_btn = gr.Button("Analyze Text", elem_id="analyze-btn")
853
-
854
- # Right column - Results
855
- with gr.Column():
856
- output_html = gr.HTML(label="Highlighted Analysis")
857
- output_sentences = gr.Textbox(label="Sentence-by-Sentence Analysis", lines=10)
858
- output_result = gr.Textbox(label="Overall Result", lines=4)
859
-
860
- # Connect the components
861
- analyze_btn.click(
862
- analyze_text_wrapper,
863
- inputs=[text_input, mode_selection],
864
- outputs=[output_html, output_sentences, output_result]
865
  )
866
-
867
- clear_btn.click(
868
- clear_inputs,
869
- inputs=None,
870
- outputs=[text_input, output_html, output_sentences, output_result]
871
- )
872
-
873
- # Make the paperclip button trigger the file upload
874
- def trigger_upload():
875
- return gr.update(visible=True)
876
-
877
- upload_trigger.click(
878
- trigger_upload,
879
- inputs=None,
880
- outputs=file_upload
881
- )
882
-
883
- # Process the file when uploaded
884
- file_upload.change(
885
- handle_file_upload_wrapper,
886
- inputs=[file_upload, mode_selection],
887
- outputs=[output_html, output_sentences, output_result]
888
- )
889
-
890
- return demo
891
- # Setup the app with CORS middleware
892
- def setup_app():
893
- demo = setup_interface()
894
-
895
- # Get the FastAPI app from Gradio
896
- app = demo.app
897
-
898
- # Add CORS middleware
899
- app.add_middleware(
900
- CORSMiddleware,
901
- allow_origins=["*"], # For development
902
- allow_credentials=True,
903
- allow_methods=["GET", "POST", "OPTIONS"],
904
- allow_headers=["*"],
905
- )
906
-
907
- return demo
908
-
909
- # Initialize the application
910
  if __name__ == "__main__":
911
- demo = setup_app()
912
-
913
- # Start the server
914
  demo.queue()
915
  demo.launch(
916
  server_name="0.0.0.0",
 
18
  from io import BytesIO
19
  import base64
20
  import hashlib
 
 
 
 
21
 
22
  # Configure logging
23
  logging.basicConfig(level=logging.INFO)
 
32
  BATCH_SIZE = 8 # Reduced batch size for CPU
33
  MAX_WORKERS = 4 # Number of worker threads for processing
34
 
 
 
 
 
 
 
 
 
 
 
 
35
  # Get password hash from environment variable (more secure)
36
  ADMIN_PASSWORD_HASH = os.environ.get('ADMIN_PASSWORD_HASH')
37
 
 
41
  # Excel file path for logs
42
  EXCEL_LOG_PATH = "/tmp/prediction_logs.xlsx"
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  def is_admin_password(input_text: str) -> bool:
45
  """
46
  Check if the input text matches the admin password using secure hash comparison.
47
+ This prevents the password from being visible in the source code.
48
  """
49
  # Hash the input text
50
  input_hash = hashlib.sha256(input_text.strip().encode()).hexdigest()
 
105
 
106
  class TextClassifier:
107
  def __init__(self):
108
+ # Set thread configuration before any model loading or parallel work
109
+ if not torch.cuda.is_available():
110
+ torch.set_num_threads(MAX_WORKERS)
111
+ torch.set_num_interop_threads(MAX_WORKERS)
112
+
113
  self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
114
  self.model_name = MODEL_NAME
115
  self.tokenizer = None
 
144
 
145
  self.model.eval()
146
 
 
147
  def quick_scan(self, text: str) -> Dict:
148
  """Perform a quick scan using simple window analysis."""
149
  if not text.strip():
 
253
  for window_idx, indices in enumerate(batch_indices):
254
  center_idx = len(indices) // 2
255
  center_weight = 0.7 # Higher weight for center sentence
256
+ edge_weight = 0.3 / (len(indices) - 1) # Distribute remaining weight
257
 
258
  for pos, sent_idx in enumerate(indices):
259
  # Apply higher weight to center sentence
 
276
 
277
  # Apply minimal smoothing at prediction boundaries
278
  if i > 0 and i < len(sentences) - 1:
279
+ prev_human = sentence_scores[i-1]['human_prob'] / sentence_appearances[i-1]
280
+ prev_ai = sentence_scores[i-1]['ai_prob'] / sentence_appearances[i-1]
281
+ next_human = sentence_scores[i+1]['human_prob'] / sentence_appearances[i+1]
282
+ next_ai = sentence_scores[i+1]['ai_prob'] / sentence_appearances[i+1]
283
 
284
  # Check if we're at a prediction boundary
285
  current_pred = 'human' if human_prob > ai_prob else 'ai'
 
354
  'num_sentences': num_sentences
355
  }
356
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
357
  def initialize_excel_log():
358
  """Initialize the Excel log file if it doesn't exist."""
359
  if not os.path.exists(EXCEL_LOG_PATH):
 
381
  wb.save(EXCEL_LOG_PATH)
382
  logger.info(f"Initialized Excel log file at {EXCEL_LOG_PATH}")
383
 
 
384
  def log_prediction_data(input_text, word_count, prediction, confidence, execution_time, mode):
385
  """Log prediction data to an Excel file in the /tmp directory."""
386
  # Initialize the Excel file if it doesn't exist
 
423
  logger.error(f"Error logging prediction data to Excel: {str(e)}")
424
  return False
425
 
 
426
  def get_logs_as_base64():
427
  """Read the Excel logs file and return as base64 for downloading."""
428
  if not os.path.exists(EXCEL_LOG_PATH):
 
441
  logger.error(f"Error reading Excel logs: {str(e)}")
442
  return None
443
 
 
444
  def analyze_text(text: str, mode: str, classifier: TextClassifier) -> tuple:
445
  """Analyze text using specified mode and return formatted results."""
446
  # Check if the input text matches the admin password using secure comparison
 
563
  # Initialize the classifier globally
564
  classifier = TextClassifier()
565
 
566
+ # Create Gradio interface
567
+ demo = gr.Interface(
568
+ fn=lambda text, mode: analyze_text(text, mode, classifier),
569
+ inputs=[
570
+ gr.Textbox(
571
+ lines=8,
572
+ placeholder="Enter text to analyze...",
573
+ label="Input Text"
574
+ ),
575
+ gr.Radio(
576
+ choices=["quick", "detailed"],
577
+ value="quick",
578
+ label="Analysis Mode",
579
+ info="Quick mode for faster analysis, Detailed mode for sentence-level analysis"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
580
  )
581
+ ],
582
+ outputs=[
583
+ gr.HTML(label="Highlighted Analysis"),
584
+ gr.Textbox(label="Sentence-by-Sentence Analysis", lines=10),
585
+ gr.Textbox(label="Overall Result", lines=4)
586
+ ],
587
+ title="AI Text Detector",
588
+ description="Analyze text to detect if it was written by a human or AI. Choose between quick scan and detailed sentence-level analysis. 200+ words suggested for accurate predictions.",
589
+ api_name="predict",
590
+ flagging_mode="never"
591
+ )
592
+
593
+ # Get the FastAPI app from Gradio
594
+ app = demo.app
595
+
596
+ # Add CORS middleware
597
+ app.add_middleware(
598
+ CORSMiddleware,
599
+ allow_origins=["*"], # For development
600
+ allow_credentials=True,
601
+ allow_methods=["GET", "POST", "OPTIONS"],
602
+ allow_headers=["*"],
603
+ )
604
+
605
+ # Ensure CORS is applied before launching
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
606
  if __name__ == "__main__":
 
 
 
607
  demo.queue()
608
  demo.launch(
609
  server_name="0.0.0.0",