fecia commited on
Commit
54b9d20
·
verified ·
1 Parent(s): 6045f26

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +760 -229
app.py CHANGED
@@ -15,7 +15,8 @@ import pandas as pd # Para formatear la salida en tabla
15
 
16
  # --- Configuración ---
17
  MODEL_REPO_ID = "google/cxr-foundation"
18
- MODEL_DOWNLOAD_DIR = './hf_cxr_foundation_space'
 
19
  SIMILARITY_DIFFERENCE_THRESHOLD = 0.1
20
  POSITIVE_SIMILARITY_THRESHOLD = 0.1
21
  print(f"Usando umbrales: Comp Δ={SIMILARITY_DIFFERENCE_THRESHOLD}, Simp τ={POSITIVE_SIMILARITY_THRESHOLD}")
@@ -30,405 +31,927 @@ criteria_list_negative = [
30
  "cropped image", "scapulae overlying lungs", "blurred image", "obscuring artifact"
31
  ]
32
 
33
- # --- Funciones Auxiliares (MISMAS que en la versión anterior de Gradio) ---
34
- # @tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string)])
35
- # def preprocess_text(text):
36
- # return bert_preprocessor_global(text) # Asume que bert_preprocessor_global está cargado
 
37
 
38
  def bert_tokenize(text, preprocessor):
39
- if preprocessor is None: raise ValueError("BERT preprocessor no está cargado.")
 
 
40
  if not isinstance(text, str): text = str(text)
41
- out = preprocessor(tf.constant([text.lower()]))
 
 
 
 
42
  ids = out['input_word_ids'].numpy().astype(np.int32)
43
- masks = out['input_mask'].numpy().astype(np.float32)
44
  paddings = 1.0 - masks
45
- end_token_idx = (ids == 102)
 
 
 
 
 
 
 
 
 
46
  ids[end_token_idx] = 0
47
- paddings[end_token_idx] = 1.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  if ids.ndim == 2: ids = np.expand_dims(ids, axis=1)
49
- if paddings.ndim == 2: paddings = np.expand_dims(paddings, axis=1)
50
- expected_shape = (1, 1, 128)
 
 
 
 
 
51
  if ids.shape != expected_shape:
52
- if ids.shape == (1,128): ids = np.expand_dims(ids, axis=1)
53
- else: raise ValueError(f"Shape incorrecta para ids: {ids.shape}, esperado {expected_shape}")
54
- if paddings.shape != expected_shape:
55
- if paddings.shape == (1,128): paddings = np.expand_dims(paddings, axis=1)
 
 
 
 
 
 
 
 
 
 
 
 
56
  else: raise ValueError(f"Shape incorrecta para paddings: {paddings.shape}, esperado {expected_shape}")
 
57
  return ids, paddings
58
 
59
- def png_to_tfexample(image_array: np.ndarray) -> tf.train.Example:
60
- if image_array.ndim == 3 and image_array.shape[2] == 1:
61
- image_array = np.squeeze(image_array, axis=2)
 
 
 
 
 
 
 
62
  elif image_array.ndim != 2:
63
- raise ValueError(f'Array debe ser 2-D. Dimensiones: {image_array.ndim}')
 
 
 
 
64
  image = image_array.astype(np.float32)
65
- min_val, max_val = image.min(), image.max()
66
- if max_val <= min_val:
67
- if image_array.dtype == np.uint8 or (min_val >= 0 and max_val <= 255):
68
- pixel_array = image.astype(np.uint8); bitdepth = 8
69
- else:
70
- pixel_array = np.zeros_like(image, dtype=np.uint16); bitdepth = 16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  else:
72
- image -= min_val
73
- current_max = max_val - min_val
 
 
 
74
  if image_array.dtype != np.uint8:
 
75
  image *= 65535 / current_max
76
- pixel_array = image.astype(np.uint16); bitdepth = 16
 
 
 
 
 
77
  else:
78
- image *= 255 / current_max
79
- pixel_array = image.astype(np.uint8); bitdepth = 8
80
- output = io.BytesIO()
81
- png.Writer(width=pixel_array.shape[1], height=pixel_array.shape[0], greyscale=True, bitdepth=bitdepth).write(output, pixel_array.tolist())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  example = tf.train.Example()
83
- features = example.features.feature
84
- features['image/encoded'].bytes_list.value.append(output.getvalue())
 
 
 
85
  features['image/format'].bytes_list.value.append(b'png')
86
  return example
87
 
88
- def generate_image_embedding(img_np, elixrc_infer, qformer_infer):
89
- if elixrc_infer is None or qformer_infer is None: raise ValueError("Modelos ELIXR-C o QFormer no cargados.")
 
 
 
 
 
 
 
 
 
 
90
  try:
91
- serialized_img_tf_example = png_to_tfexample(img_np).SerializeToString()
92
- elixrc_output = elixrc_infer(input_example=tf.constant([serialized_img_tf_example]))
 
 
 
 
 
 
93
  elixrc_embedding = elixrc_output['feature_maps_0'].numpy()
94
- qformer_input_img = {
 
 
 
 
 
 
 
 
 
 
 
95
  'image_feature': elixrc_embedding.tolist(),
96
- 'ids': np.zeros((1, 1, 128), dtype=np.int32).tolist(),
97
- 'paddings': np.ones((1, 1, 128), dtype=np.float32).tolist(),
 
 
 
 
 
 
98
  }
99
- qformer_output_img = qformer_infer(**qformer_input_img)
100
- image_embedding = qformer_output_img['all_contrastive_img_emb'].numpy()
101
- if image_embedding.ndim > 2:
102
- image_embedding = np.mean(image_embedding, axis=tuple(range(1, image_embedding.ndim - 1)))
103
- if image_embedding.ndim == 1: image_embedding = np.expand_dims(image_embedding, axis=0)
104
- if image_embedding.ndim != 2: raise ValueError(f"Embedding final no tiene 2 dims: {image_embedding.shape}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  return image_embedding
106
- except Exception as e:
107
- print(f"Error generando embedding imagen: {e}"); traceback.print_exc(); raise
108
 
109
- def calculate_similarities_and_classify(image_embedding, bert_preprocessor, qformer_infer):
110
- if image_embedding is None: raise ValueError("Embedding imagen es None.")
111
- if bert_preprocessor is None: raise ValueError("Preprocesador BERT es None.")
112
- if qformer_infer is None: raise ValueError("QFormer es None.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  detailed_results = {}
114
- print("\n--- Calculando similitudes ---")
 
115
  for i in range(len(criteria_list_positive)):
116
- positive_text, negative_text = criteria_list_positive[i], criteria_list_negative[i]
117
- criterion_name = positive_text
118
- print(f"Procesando: \"{criterion_name}\"")
119
- similarity_positive, similarity_negative, difference = None, None, None
 
 
 
 
 
 
 
 
 
120
  classification_comp, classification_simp = "ERROR", "ERROR"
 
121
  try:
122
- tokens_pos, paddings_pos = bert_tokenize(positive_text, bert_preprocessor)
123
- qformer_input_pos = {'image_feature': np.zeros([1, 8, 8, 1376], dtype=np.float32).tolist(), 'ids': tokens_pos.tolist(), 'paddings': paddings_pos.tolist()}
124
- text_embedding_pos = qformer_infer(**qformer_input_pos)['contrastive_txt_emb'].numpy()
125
- if text_embedding_pos.ndim == 1: text_embedding_pos = np.expand_dims(text_embedding_pos, axis=0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
 
127
  tokens_neg, paddings_neg = bert_tokenize(negative_text, bert_preprocessor)
128
- qformer_input_neg = {'image_feature': np.zeros([1, 8, 8, 1376], dtype=np.float32).tolist(), 'ids': tokens_neg.tolist(), 'paddings': paddings_neg.tolist()}
129
- text_embedding_neg = qformer_infer(**qformer_input_neg)['contrastive_txt_emb'].numpy()
130
- if text_embedding_neg.ndim == 1: text_embedding_neg = np.expand_dims(text_embedding_neg, axis=0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
 
132
- if image_embedding.shape[1] != text_embedding_pos.shape[1]: raise ValueError(f"Dim mismatch: Img ({image_embedding.shape[1]}) vs Pos ({text_embedding_pos.shape[1]})")
133
- if image_embedding.shape[1] != text_embedding_neg.shape[1]: raise ValueError(f"Dim mismatch: Img ({image_embedding.shape[1]}) vs Neg ({text_embedding_neg.shape[1]})")
 
 
 
134
 
135
- similarity_positive = cosine_similarity(image_embedding, text_embedding_pos)[0][0]
136
- similarity_negative = cosine_similarity(image_embedding, text_embedding_neg)[0][0]
137
 
138
- difference = similarity_positive - similarity_negative
139
- classification_comp = "PASS" if difference > SIMILARITY_DIFFERENCE_THRESHOLD else "FAIL"
140
- classification_simp = "PASS" if similarity_positive > POSITIVE_SIMILARITY_THRESHOLD else "FAIL"
141
- print(f" Sim(+)={similarity_positive:.4f}, Sim(-)={similarity_negative:.4f}, Diff={difference:.4f} -> Comp:{classification_comp}, Simp:{classification_simp}")
142
- except Exception as e:
143
- print(f" ERROR criterio '{criterion_name}': {e}"); traceback.print_exc()
144
  detailed_results[criterion_name] = {
145
- 'positive_prompt': positive_text, 'negative_prompt': negative_text,
146
- 'similarity_positive': float(similarity_positive) if similarity_positive is not None else None,
147
- 'similarity_negative': float(similarity_negative) if similarity_negative is not None else None,
148
- 'difference': float(difference) if difference is not None else None,
149
- 'classification_comparative': classification_comp, 'classification_simplified': classification_simp
 
150
  }
151
- return detailed_results
 
152
 
153
  # --- Carga Global de Modelos ---
154
- print("--- Iniciando carga global de modelos ---")
 
 
155
  start_time = time.time()
156
  models_loaded = False
157
  bert_preprocessor_global = None
158
- elixrc_infer_global = None
 
 
 
 
159
  qformer_infer_global = None
160
- try:
161
- # Añadir token si es necesario (para repos privados o gated)
162
- hf_token = os.environ.get("HF_TOKEN") # Leer token desde secretos del Space
163
- # if hf_token:
164
- # print("Usando HF_TOKEN para autenticación.")
165
- # HfFolder.save_token(hf_token)
 
 
 
 
 
 
 
166
 
167
- os.makedirs(MODEL_DOWNLOAD_DIR, exist_ok=True)
168
- print(f"Descargando/verificando modelos en: {MODEL_DOWNLOAD_DIR}")
169
  snapshot_download(repo_id=MODEL_REPO_ID, local_dir=MODEL_DOWNLOAD_DIR,
170
- allow_patterns=['elixr-c-v2-pooled/*', 'pax-elixr-b-text/*'],
 
171
  local_dir_use_symlinks=False, token=hf_token) # Pasar token aquí
172
  print("Modelos descargados/verificados.")
173
 
174
- print("Cargando Preprocesador BERT...")
175
- bert_preprocess_handle = "https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
176
- bert_preprocessor_global = tf_hub.KerasLayer(bert_preprocess_handle)
177
- print("Preprocesador BERT cargado.")
178
-
179
- print("Cargando ELIXR-C...")
180
- elixrc_model_path = os.path.join(MODEL_DOWNLOAD_DIR, 'elixr-c-v2-pooled')
181
- elixrc_model = tf.saved_model.load(elixrc_model_path)
 
 
 
 
 
 
 
 
 
 
 
182
  elixrc_infer_global = elixrc_model.signatures['serving_default']
183
- print("Modelo ELIXR-C cargado.")
 
 
 
184
 
185
- print("Cargando QFormer (ELIXR-B Text)...")
186
- qformer_model_path = os.path.join(MODEL_DOWNLOAD_DIR, 'pax-elixr-b-text')
187
- qformer_model = tf.saved_model.load(qformer_model_path)
 
 
 
 
188
  qformer_infer_global = qformer_model.signatures['serving_default']
 
189
  print("Modelo QFormer cargado.")
190
 
191
  models_loaded = True
192
- end_time = time.time()
193
- print(f"--- Modelos cargados globalmente con éxito en {end_time - start_time:.2f} segundos ---")
 
 
 
 
 
194
  except Exception as e:
195
  models_loaded = False
196
- print(f"--- ERROR CRÍTICO DURANTE LA CARGA GLOBAL DE MODELOS ---"); print(e); traceback.print_exc()
 
197
 
198
- # --- Función Principal de Procesamiento para Gradio ---
199
- def assess_quality_and_update_ui(image_pil):
200
- """Procesa la imagen y devuelve actualizaciones para la UI."""
 
 
 
 
201
  if not models_loaded:
202
- raise gr.Error("Error: Los modelos no se pudieron cargar. La aplicación no puede procesar imágenes.")
203
- if image_pil is None:
 
204
  # Devuelve valores por defecto/vacíos y controla la visibilidad
205
  return (
206
- gr.update(visible=True), # Muestra bienvenida
207
- gr.update(visible=False), # Oculta resultados
208
- None, # Borra imagen de salida
 
 
 
 
 
209
  gr.update(value="N/A"), # Borra etiqueta
210
- pd.DataFrame(), # Borra dataframe
 
 
211
  None # Borra JSON
212
  )
213
 
214
  print("\n--- Iniciando evaluación para nueva imagen ---")
215
- start_process_time = time.time()
216
  try:
217
- # 1. Convertir a NumPy
218
- img_np = np.array(image_pil.convert('L'))
219
- # 2. Generar Embedding
220
- image_embedding = generate_image_embedding(img_np, elixrc_infer_global, qformer_infer_global)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
  # 3. Clasificar
222
- detailed_results = calculate_similarities_and_classify(image_embedding, bert_preprocessor_global, qformer_infer_global)
223
- # 4. Formatear Resultados
224
- output_data, passed_count, total_count = [], 0, 0
225
- for criterion, details in detailed_results.items():
 
 
 
 
 
 
 
 
 
226
  total_count += 1
227
- sim_pos = details['similarity_positive']
228
- sim_neg = details['similarity_negative']
 
 
 
 
 
229
  diff = details['difference']
230
  comp = details['classification_comparative']
231
  simp = details['classification_simplified']
232
- output_data.append([ criterion, f"{sim_pos:.4f}" if sim_pos else "N/A",
233
- f"{sim_neg:.4f}" if sim_neg else "N/A", f"{diff:.4f}" if diff else "N/A", comp, simp ])
 
 
 
 
 
 
 
 
234
  if comp == "PASS": passed_count += 1
235
- df_results = pd.DataFrame(output_data, columns=[ "Criterion", "Sim (+)", "Sim (-)", "Difference", "Assessment (Comp)", "Assessment (Simp)" ])
236
- overall_quality = "Error"; pass_rate = 0
 
 
 
 
237
  if total_count > 0:
238
- pass_rate = passed_count / total_count
239
- if pass_rate >= 0.85: overall_quality = "Excellent"
240
- elif pass_rate >= 0.70: overall_quality = "Good"
241
- elif pass_rate >= 0.50: overall_quality = "Fair"
 
 
 
 
 
 
 
 
242
  else: overall_quality = "Poor"
243
- quality_label = f"{overall_quality} ({passed_count}/{total_count} passed)"
 
 
 
 
 
244
  end_process_time = time.time()
245
- print(f"--- Evaluación completada en {end_process_time - start_process_time:.2f} seg ---")
 
 
246
  # Devolver resultados y actualizar visibilidad
247
  return (
248
- gr.update(visible=False), # Oculta bienvenida
 
 
249
  gr.update(visible=True), # Muestra resultados
250
- image_pil, # Muestra imagen procesada
 
251
  gr.update(value=quality_label), # Actualiza etiqueta
252
  df_results, # Actualiza dataframe
253
- detailed_results # Actualiza JSON
254
  )
255
- except Exception as e:
256
- print(f"Error durante procesamiento Gradio: {e}"); traceback.print_exc()
257
- raise gr.Error(f"Error procesando imagen: {str(e)}")
 
 
 
 
 
 
 
 
258
 
259
  # --- Función para Resetear la UI ---
260
- def reset_ui():
 
261
  print("Reseteando UI...")
262
  return (
263
- gr.update(visible=True), # Muestra bienvenida
 
264
  gr.update(visible=False), # Oculta resultados
265
- None, # Borra imagen de entrada
 
 
 
 
266
  None, # Borra imagen de salida
267
  gr.update(value="N/A"), # Borra etiqueta
268
- pd.DataFrame(), # Borra dataframe
 
 
 
269
  None # Borra JSON
270
  )
271
 
272
  # --- Definir Tema Oscuro Personalizado ---
273
- # Inspirado en los colores del HTML original y Tailwind dark grays/blues
274
- dark_theme = gr.themes.Default(
 
 
275
  primary_hue=gr.themes.colors.blue, # Azul como color primario
276
- secondary_hue=gr.themes.colors.blue, # Azul secundario
277
- neutral_hue=gr.themes.colors.gray, # Gris neutro
278
- font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"],
279
- font_mono=[gr.themes.GoogleFont("JetBrains Mono"), "ui-monospace", "Consolas", "monospace"],
280
- ).set(
 
 
 
 
 
 
281
  # Fondos
282
  body_background_fill="#111827", # Fondo principal muy oscuro (gray-900)
283
- background_fill_primary="#1f2937", # Fondo de componentes (gray-800)
284
- background_fill_secondary="#374151", # Fondo secundario (gray-700)
285
- block_background_fill="#1f2937", # Fondo de bloques (gray-800)
 
 
 
 
 
 
286
 
287
  # Texto
288
- body_text_color="#d1d5db", # Texto principal claro (gray-300)
289
- text_color_subdued="#9ca3af", # Texto secundario (gray-400)
 
 
 
 
 
 
 
 
 
 
 
 
290
  block_label_text_color="#d1d5db", # Etiquetas de bloque (gray-300)
291
- block_title_text_color="#ffffff", # Títulos de bloque (blanco)
 
 
 
 
 
292
 
293
- # Bordes
294
- border_color_accent="#374151", # Borde (gray-700)
295
- border_color_primary="#4b5563", # Borde primario (gray-600)
 
 
 
 
 
 
 
 
296
 
297
  # Botones y Elementos Interactivos
298
- button_primary_background_fill="*primary_600", # Usa color primario (azul)
 
299
  button_primary_text_color="#ffffff",
300
- button_secondary_background_fill="*neutral_700",
 
301
  button_secondary_text_color="#ffffff",
302
- input_background_fill="#374151", # Fondo de inputs (gray-700)
303
- input_border_color="#4b5563", # Borde de inputs (gray-600)
 
 
 
 
304
  input_text_color="#ffffff", # Texto en inputs
305
 
306
  # Sombras y Radios
307
- shadow_drop="rgba(0,0,0,0.2) 0px 2px 4px",
308
- block_shadow="rgba(0,0,0,0.2) 0px 2px 5px",
309
- radius_size="*radius_lg", # Bordes redondeados
 
 
 
 
 
310
  )
311
 
312
 
313
- # --- Definir la Interfaz Gradio con Bloques y Tema ---
 
314
  with gr.Blocks(theme=dark_theme, title="CXR Quality Assessment") as demo:
315
  # --- Cabecera ---
316
  with gr.Row():
317
  gr.Markdown(
318
  """
319
- # <span style="color: #e5e7eb;">CXR Quality Assessment</span>
320
- <p style="color: #9ca3af;">Evaluate chest X-ray technical quality using AI (ELIXR family)</p>
321
- """, # Usar blanco/gris claro para texto cabecera
322
- elem_id="app-header"
 
 
 
 
323
  )
324
 
325
  # --- Contenido Principal (Dos Columnas) ---
326
- with gr.Row(equal_height=False): # Permitir alturas diferentes
 
327
 
328
- # --- Columna Izquierda (Carga) ---
329
- with gr.Column(scale=1, min_width=350):
330
- gr.Markdown("### 1. Upload Image", elem_id="upload-title")
331
- input_image = gr.Image(type="pil", label="Upload Chest X-ray", height=300) # Altura fija para imagen entrada
 
 
 
 
 
 
332
  with gr.Row():
333
- analyze_btn = gr.Button("Analyze Image", variant="primary", scale=2)
 
 
334
  reset_btn = gr.Button("Reset", variant="secondary", scale=1)
335
- # Añadir ejemplos si tienes imágenes de ejemplo
 
 
336
  # gr.Examples(
337
- # examples=[os.path.join("examples", "sample_cxr.png")],
 
 
 
338
  # inputs=input_image, label="Example CXR"
339
  # )
340
  gr.Markdown(
341
- "<p style='color:#9ca3af; font-size:0.9em;'>Model loading on startup takes ~1 min. Analysis takes ~15-40 sec.</p>"
 
 
342
  )
343
 
344
 
345
  # --- Columna Derecha (Bienvenida / Resultados) ---
346
- with gr.Column(scale=2):
 
 
 
 
 
 
 
 
 
 
 
 
347
 
348
- # --- Bloque de Bienvenida (Visible Inicialmente) ---
349
- with gr.Column(visible=True, elem_id="welcome-section") as welcome_block:
350
- gr.Markdown(
351
  """
352
  ### Welcome!
353
- Upload a chest X-ray image (PNG, JPG, etc.) on the left panel and click "Analyze Image".
 
 
354
 
355
- The system will evaluate its technical quality based on 7 standard criteria using the ELIXR model family.
 
 
356
  The results will appear here once the analysis is complete.
357
- """, elem_id="welcome-text"
 
 
358
  )
359
- # Podrías añadir un icono o imagen aquí si quieres
360
- # gr.Image("path/to/welcome_icon.png", interactive=False, show_label=False, show_download_button=False)
361
 
362
 
363
- # --- Bloque de Resultados (Oculto Inicialmente) ---
364
- with gr.Column(visible=False, elem_id="results-section") as results_block:
365
- gr.Markdown("### 2. Quality Assessment Results", elem_id="results-title")
366
- with gr.Row(): # Fila para imagen de salida y resumen
 
 
 
 
 
 
 
 
 
 
 
 
367
  with gr.Column(scale=1):
368
  output_image = gr.Image(type="pil", label="Analyzed Image", interactive=False)
369
- with gr.Column(scale=1):
370
- gr.Markdown("#### Summary", elem_id="summary-title")
 
 
 
 
371
  output_label = gr.Label(value="N/A", label="Overall Quality Estimate", elem_id="quality-label")
372
- # Podríamos añadir más texto de resumen aquí si quisiéramos
373
 
374
- gr.Markdown("#### Detailed Criteria Evaluation", elem_id="detailed-title")
375
- output_dataframe = gr.DataFrame(
376
- headers=["Criterion", "Sim (+)", "Sim (-)", "Difference", "Assessment (Comp)", "Assessment (Simp)"],
 
 
 
377
  label=None, # Quitar etiqueta redundante
378
  wrap=True,
379
- # La altura ahora se maneja mejor automáticamente o con CSS
380
- # row_count=(7, "dynamic") # Mostrar 7 filas, permitir scroll si hay más
381
- max_rows=10, # Limitar filas visibles con scroll
 
382
  overflow_row_behaviour="show_ends", # Muestra inicio/fin al hacer scroll
383
- interactive=False, # No editable
384
  elem_id="results-dataframe"
385
  )
386
- with gr.Accordion("Raw JSON Output (for debugging)", open=False):
 
 
 
 
 
 
 
 
 
387
  output_json = gr.JSON(label=None)
388
 
389
  gr.Markdown(
390
  f"""
391
  #### Technical Notes
392
- * **Criterion:** Quality aspect evaluated.
 
 
 
393
  * **Sim (+/-):** Cosine similarity with positive/negative prompt.
394
  * **Difference:** Sim (+) - Sim (-).
395
- * **Assessment (Comp):** PASS if Difference > {SIMILARITY_DIFFERENCE_THRESHOLD}. (Main Result)
396
- * **Assessment (Simp):** PASS if Sim (+) > {POSITIVE_SIMILARITY_THRESHOLD}.
 
 
 
 
397
  """, elem_id="notes-text"
398
  )
399
 
400
  # --- Pie de página ---
401
  gr.Markdown(
402
  """
403
- ----
404
- <p style='text-align:center; color:#9ca3af; font-size:0.8em;'>
405
- CXR Quality Assessment Tool | Model: google/cxr-foundation | Interface: Gradio
 
 
 
406
  </p>
407
  """, elem_id="app-footer"
408
- )
 
 
 
 
409
 
410
 
411
  # --- Conexiones de Eventos ---
412
  analyze_btn.click(
413
- fn=assess_quality_and_update_ui,
414
- inputs=[input_image],
 
415
  outputs=[
416
- welcome_block, # -> actualiza visibilidad bienvenida
 
417
  results_block, # -> actualiza visibilidad resultados
418
- output_image, # -> muestra imagen analizada
 
 
419
  output_label, # -> actualiza etiqueta resumen
420
  output_dataframe, # -> actualiza tabla
421
- output_json # -> actualiza JSON
422
  ]
423
  )
424
 
425
  reset_btn.click(
426
  fn=reset_ui,
427
- inputs=None, # No necesita inputs
 
 
428
  outputs=[
429
  welcome_block,
430
- results_block,
431
- input_image, # -> limpia imagen entrada
 
432
  output_image,
433
  output_label,
434
  output_dataframe,
@@ -436,10 +959,18 @@ with gr.Blocks(theme=dark_theme, title="CXR Quality Assessment") as demo:
436
  ]
437
  )
438
 
 
 
439
 
440
- # --- Iniciar la Aplicación Gradio ---
441
  if __name__ == "__main__":
442
- # server_name="0.0.0.0" para accesibilidad en red local
443
- # server_port=7860 es el puerto estándar de HF Spaces
444
- # auth=("user", "password") # Si quieres añadir autenticación básica localmente
445
- demo.launch(server_name="0.0.0.0", server_port=7860) #, share=True) # Quita share=True para despliegue normal
 
 
 
 
 
 
 
15
 
16
  # --- Configuración ---
17
  MODEL_REPO_ID = "google/cxr-foundation"
18
+ MODEL_DOWNLOAD_DIR = './hf_cxr_foundation_space' # Directorio dentro del contenedor del Space
19
+ # Umbrales
20
  SIMILARITY_DIFFERENCE_THRESHOLD = 0.1
21
  POSITIVE_SIMILARITY_THRESHOLD = 0.1
22
  print(f"Usando umbrales: Comp Δ={SIMILARITY_DIFFERENCE_THRESHOLD}, Simp τ={POSITIVE_SIMILARITY_THRESHOLD}")
 
31
  "cropped image", "scapulae overlying lungs", "blurred image", "obscuring artifact"
32
  ]
33
 
34
+ # --- Funciones Auxiliares (Integradas o adaptadas) ---
35
+ # @tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string)]) # Puede ayudar rendimiento
36
+ def preprocess_text(text):
37
+ """Función interna del preprocesador BERT."""
38
+ return bert_preprocessor_global(text) # Asume que bert_preprocessor_global está cargado
39
 
40
  def bert_tokenize(text, preprocessor):
41
+ """Tokeniza texto usando el preprocesador BERT cargado globalmente."""
42
+ if preprocessor is None:
43
+ raise ValueError("BERT preprocessor no está cargado.")
44
  if not isinstance(text, str): text = str(text)
45
+
46
+ # Ejecutar el preprocesador
47
+ out¡ = preprocessor(tf.constant([text.lower()]))
48
+
49
+ # Extraer y procesar IDs y máscaras
50
  ids = out['input_word_ids'].numpy().astype(np.int32)
51
+ masks =Por supuesto! Aquí está el código completo del archivo `app.py` para out['input_mask'].numpy().astype(np.float32)
52
  paddings = 1.0 - masks
53
+
54
+ # Reemplazar token [SEP] (102) por 0 y marcar Gradio con la corrección del tema oscuro (eliminando `text_color_subdued`).
55
+
56
+ como padding
57
+ end_token_idx = (ids == 10```python
58
+ import gradio as gr
59
+ import os
60
+ import io
61
+ import png
62
+ import tensorflow as tf2)
63
  ids[end_token_idx] = 0
64
+
65
+ import tensorflow_text as tf_text
66
+ import tensorflow_hub as tf paddings[end_token_idx] = 1.0_hub
67
+ import numpy as np
68
+ from PIL import Image
69
+ from huggingface_hub import snapshot_download,
70
+
71
+ # Asegurar las dimensiones (B, T, S) -> ( HfFolder
72
+ from sklearn.metrics.pairwise import cosine_similarity
73
+ import1, 1, 128)
74
+ # El preprocesador puede devolver (1, 128), necesitamos (1, 1, 12 traceback
75
+ import time
76
+ import pandas as pd # Para formatear la salida en tabla
77
+
78
+ # --- Configuración ---8)
79
  if ids.ndim == 2: ids = np.expand_dims(ids, axis=1)
80
+ if paddings.
81
+ MODEL_REPO_ID = "google/cxr-foundation"
82
+ ndim == 2: paddings = np.expand_dims(paddMODEL_DOWNLOAD_DIR = './hf_cxr_foundation_space'ings, axis=1)
83
+
84
+ # Verificar formas finales
85
+ expected_shape = (1 # Directorio dentro del contenedor del Space
86
+ SIMILARITY_DIFFERENCE_THRESHOLD = , 1, 128)
87
  if ids.shape != expected_shape:
88
+ # Intentar reajustar si es necesario (puede0.1
89
+ POSITIVE_SIMILARITY_THRESHOLD = 0.1 pasar con algunas versiones)
90
+ if ids.shape == (1,1
91
+ print(f"Usando umbrales: Comp Δ={SIMILAR28): ids = np.expand_dims(ids, axis=1ITY_DIFFERENCE_THRESHOLD}, Simp τ={POSITIVE_SIMILARITY_THRESHOLD}")
92
+
93
+ # --- Prompts ---
94
+ criteria_list_positive)
95
+ else: raise ValueError(f"Shape incorrecta para ids: = [
96
+ "optimal centering", "optimal inspiration", "optimal penetration",
97
+ "complete field of view {ids.shape}, esperado {expected_shape}")
98
+ if paddings", "scapulae retracted", "sharp image", "artifact free"
99
+ ].shape != expected_shape:
100
+ if paddings.shape == (
101
+ criteria_list_negative = [
102
+ "poorly centered", "1,128): paddings = np.expand_dims(paddings, axis=1)poor inspiration", "non-diagnostic exposure",
103
+ "cropped image", "scapulae overlying lungs
104
  else: raise ValueError(f"Shape incorrecta para paddings: {paddings.shape}, esperado {expected_shape}")
105
+
106
  return ids, paddings
107
 
108
+ ", "blurred image", "obscuring artifact"
109
+ ]
110
+
111
+ # --- Funciones Auxiliadef png_to_tfexample(image_array: np.ndarray) -> tf.train.Example:
112
+ """Crea tf.train.Example desde NumPy array (res (Integradas o adaptadas) ---
113
+ def bert_tokenize(text, preprocessor):
114
+ escala de grises)."""
115
+ if image_array.ndim == """Tokeniza texto usando el preprocesador BERT cargado globalmente."""
116
+ if 3 and image_array.shape[2] == 1:
117
+ preprocessor is None: raise ValueError("BERT preprocessor no está cargado.") image_array = np.squeeze(image_array, axis=2) # Asegurar 2D
118
  elif image_array.ndim != 2:
119
+ raise ValueError(f'Array debe ser 2-D (
120
+ if not isinstance(text, str): text = str(text)escala de grises). Dimensiones actuales: {image_array.ndim
121
+
122
+ out = preprocessor(tf.constant([text.lower()]))}')
123
+
124
  image = image_array.astype(np.float32)
125
+ min
126
+ ids = out['input_word_ids'].numpy().astype(_val = image.min()
127
+ max_val = image.max()
128
+
129
+ np.int32)
130
+ masks = out['input_mask'].# Evitar división por cero si la imagen es constante
131
+ if max_val <= min_val:numpy().astype(np.float32)
132
+ paddings =
133
+ # Si es constante, tratar como uint8 si el rango original lo permitía,
134
+ 1.0 - masks
135
+ end_token_idx = (ids == 102)
136
+ # o simplemente ponerla a 0 si es float.
137
+ if image_array. ids[end_token_idx] = 0
138
+ paddings[end_token_idx] = 1.0
139
+
140
+ if ids.ndim == 2dtype == np.uint8 or (min_val >= 0 and max: ids = np.expand_dims(ids, axis=1)
141
+ if paddings.ndim == 2: paddings = np.expand_val <= 255):
142
+ pixel_array = image._dims(paddings, axis=1)
143
+
144
+ expected_shape = (1,astype(np.uint8)
145
+ bitdepth = 8
146
+ 1, 128)
147
+ if ids.shape != expectedelse: # Caso flotante constante o fuera de rango uint8
148
+ pixel__shape:
149
+ if ids.shape == (1,128): ids = np.expand_dims(ids, axis=1)
150
+ else: raise ValueErrorarray = np.zeros_like(image, dtype=np.uint1(f"Shape incorrecta para ids: {ids.shape}, esperado {6)
151
+ bitdepth = 16
152
  else:
153
+ expected_shape}")
154
+ if paddings.shape != expected_shape:image -= min_val # Mover mínimo a cero
155
+ current_max = max_val -
156
+ if paddings.shape == (1,128): padd min_val
157
+ # Escalar a 16-bit para mayor precisión si noings = np.expand_dims(paddings, axis=1) era uint8 originalmente
158
  if image_array.dtype != np.uint8:
159
+ else: raise ValueError(f"Shape incorrecta para paddings:
160
  image *= 65535 / current_max
161
+ pixel_array = {paddings.shape}, esperado {expected_shape}")
162
+
163
+ return ids, paddings
164
+
165
+ image.astype(np.uint16)
166
+ bitdepth = def png_to_tfexample(image_array: np.ndarray)16
167
  else:
168
+ # Si era uint8, mantener el rango y tipo
169
+ # La resta del min ya la dejó en [0, current_max]
170
+ -> tf.train.Example:
171
+ """Crea tf.train.Example desde NumPy array ( # Escalar a 255 si es necesario
172
+ image *= 255 / current_escala de grises)."""
173
+ if image_array.ndim ==max
174
+ pixel_array = image.astype(np.uint8) 3 and image_array.shape[2] == 1:
175
+
176
+ bitdepth = 8
177
+
178
+ # Codificar como PNG
179
+ output = io.Bytes image_array = np.squeeze(image_array, axis=2IO()
180
+ png.Writer(
181
+ width=pixel_array.) # Asegurar 2D
182
+ elif image_array.ndim != 2shape[1],
183
+ height=pixel_array.shape[0],:
184
+ raise ValueError(f'Array debe ser 2-D (
185
+ greyscale=True,
186
+ bitdepth=bitdepth
187
+ escala de grises). Dimensiones actuales: {image_array.ndim).write(output, pixel_array.tolist())
188
+ png_bytes = output.getvalue()
189
+
190
+ }')
191
+
192
+ image = image_array.astype(np.float32)
193
+ min_val # Crear tf.train.Example
194
  example = tf.train.Example()
195
+ , max_val = image.min(), image.max()
196
+
197
+ if features = example.features.feature
198
+ features['image/encoded']. max_val <= min_val: # Imagen constante
199
+ if image_array.dtype == np.uint8 or (min_val >= 0 and max_bytes_list.value.append(png_bytes)
200
  features['image/format'].bytes_list.value.append(b'png')
201
  return example
202
 
203
+ def generate_image_embedding(img_np,val <= 255):
204
+ pixel_array = image.astype(np.uint8); bitdepth = 8
205
+ else:
206
+ pixel_array = np.zeros_like(image elixrc_infer, qformer_infer):
207
+ """Genera embedding final, dtype=np.uint16); bitdepth = 16
208
+ else: # Imagen con rango
209
+ image -= min_val
210
+ current_max = max_val - min de imagen."""
211
+ if elixrc_infer is None or qformer_infer is None:
212
+ raise ValueError("Modelos ELIXR-C o Q_val
213
+ if image_array.dtype != np.uint8: #Former no cargados.")
214
+
215
  try:
216
+ # 1. EL Escalar a 16-bit si no era uint8
217
+ image *= 6IXR-C
218
+ serialized_img_tf_example = png_5535 / current_max
219
+ pixel_array = image.to_tfexample(img_np).SerializeToString()
220
+ elixrc_output = elixrcastype(np.uint16); bitdepth = 16
221
+ _infer(input_example=tf.constant([serialized_img_tf_example]))else: # Mantener rango uint8
222
+ image *= 255 / current_max
223
+ pixel_array = image.astype(np.uint
224
  elixrc_embedding = elixrc_output['feature_maps_0'].numpy()
225
+ 8); bitdepth = 8
226
+
227
+ output = io.BytesIO()
228
+ png.Writer(width=pixel_array.shape[1], height=pixel_array.shape print(f" Embedding ELIXR-C shape: {elixrc_embedding.[0], greyscale=True, bitdepth=bitdepth).write(shape}")
229
+
230
+ # 2. QFormer (Imagen)
231
+ qformer_input_output, pixel_array.tolist())
232
+ png_bytes = output.getvalue()
233
+
234
+ example = tf.train.Example()
235
+ features = example.features.feature
236
+ features['image/encoded'].bytes_list.value.img = {
237
  'image_feature': elixrc_embedding.tolist(),
238
+ append(png_bytes)
239
+ features['image/format'].bytes_ 'ids': np.zeros((1, 1, 12list.value.append(b'png')
240
+ return example
241
+
242
+ def8), dtype=np.int32).tolist(), # Texto vacío
243
+ 'paddings': generate_image_embedding(img_np, elixrc_infer, np.ones((1, 1, 128), dtype= qformer_infer):
244
+ """Genera embedding final de imagen."""
245
+ if elixnp.float32).tolist(), # Todo padding
246
  }
247
+ qformer_output_img = qformer_infer(**qformer_input_imgrc_infer is None or qformer_infer is None: raise ValueError(")
248
+ image_embedding = qformer_output_img['all_contrastive_imgModelos ELIXR-C o QFormer no cargados.")
249
+ _emb'].numpy()
250
+
251
+ # Ajustar dimensiones si es necesario
252
+ if image_try:
253
+ # 1. ELIXR-C
254
+ serialized_embedding.ndim > 2:
255
+ print(f" Ajustimg_tf_example = png_to_tfexample(img_npando dimensiones embedding imagen (original: {image_embedding.shape})")
256
+ ).SerializeToString()
257
+ elixrc_output = elixrc_infer( image_embedding = np.mean(
258
+ image_embedding,
259
+ input_example=tf.constant([serialized_img_tf_example])) axis=tuple(range(1, image_embedding.ndim -
260
+ elixrc_embedding = elixrc_output['feature_maps_0'].numpy1))
261
+ )
262
+ if image_embedding.ndim == 1()
263
+ print(f" Embedding ELIXR-C shape: {elixrc_embedding.:
264
+ image_embedding = np.expand_dims(image_embedding, axis=0)
265
+ elif image_embedding.ndim == 1:
266
+ shape}")
267
+
268
+ # 2. QFormer (Imagen)
269
+ qformer_input_ image_embedding = np.expand_dims(image_embedding, axis=0) # Asegurar 2D
270
+
271
+ print(f" Embedding final imagen shape: {image_embedding.shape}")
272
+ if image_embedding.ndimimg = {
273
+ 'image_feature': elixrc_embedding.tolist(),
274
+ != 2:
275
+ raise ValueError(f"Embedding final de imagen no tiene 2 dimensiones: { 'ids': np.zeros((1, 1, 12image_embedding.shape}")
276
  return image_embedding
 
 
277
 
278
+ except Exception8), dtype=np.int32).tolist(), # Texto vacío
279
+ 'paddings as e:
280
+ print(f"Error generando embedding de imagen: {e}")
281
+ ': np.ones((1, 1, 128), dtype=np.floattraceback.print_exc()
282
+ raise # Re-lanzar32).tolist(), # Todo padding
283
+ }
284
+ qformer_output_img = qformer_ la excepción para que Gradio la maneje
285
+
286
+ def calculate_similarities_and_classify(infer(**qformer_input_img)
287
+ image_embedding = qformer_output_image_embedding, bert_preprocessor, qformer_infer):
288
+ img['all_contrastive_img_emb'].numpy()
289
+
290
+ # Ajustar dimensiones
291
+ if"""Calcula similitudes y clasifica."""
292
+ if image_embedding is None: raise ValueError("Embedding image_embedding.ndim > 2:
293
+ print(f" Ajustando de imagen es None.")
294
+ if bert_preprocessor is None: raise ValueError("Preprocesador BERT es dimensiones embedding imagen (original: {image_embedding.shape})")
295
+ image_embedding = np.mean(image_embedding, axis=tuple( None.")
296
+ if qformer_infer is None: raise ValueError("Qrange(1, image_embedding.ndim - 1)))
297
+ if image_embedding.ndim == Former es None.")
298
  detailed_results = {}
299
+ print("\n--- Calculando similitudes y clasific1: image_embedding = np.expand_dims(image_embedding,ando ---")
300
+
301
  for i in range(len(criteria_list_positive)):
302
+ axis=0) # Asegurar 2D
303
+ print(f" Embedding final imagen shapepositive_text = criteria_list_positive[i]
304
+ negative_: {image_embedding.shape}")
305
+ if image_embedding.ndimtext = criteria_list_negative[i]
306
+ criterion_name = != 2: raise ValueError(f"Embedding final imagen no tiene 2 dims positive_text # Usar prompt positivo como clave
307
+
308
+ print(f": {image_embedding.shape}")
309
+ return image_embedding
310
+ except Exception as e:
311
+ Procesando criterio: \"{criterion_name}\"")
312
+ similarity_positive, similarity print(f"Error generando embedding imagen: {e}"); traceback.print_exc(); raise
313
+
314
+ def calculate_similarities_and_classify(image_embedding, bert_preprocessor_negative, difference = None, None, None
315
  classification_comp, classification_simp = "ERROR", "ERROR"
316
+
317
  try:
318
+ #, qformer_infer):
319
+ """Calcula similitudes y clasifica."""
320
+ if image_embedding is None: raise ValueError("Embedding imagen es None.")
321
+ if bert_ 1. Embedding Texto Positivo
322
+ tokens_pos, paddings_pos = bert_tokenize(preprocessor is None: raise ValueError("Preprocesador BERT es None.")
323
+ if qformer_positive_text, bert_preprocessor)
324
+ qformer_input_infer is None: raise ValueError("QFormer es None.")
325
+ detailed_results = {}
326
+ print("\n--- Calculando similitudes y clasificando ---")
327
+ for i intext_pos = {
328
+ 'image_feature': np.zeros([ range(len(criteria_list_positive)):
329
+ positive_text,1, 8, 8, 1376], dtype= negative_text = criteria_list_positive[i], criteria_list_np.float32).tolist(), # Dummy
330
+ 'ids': tokensnegative[i]
331
+ criterion_name = positive_text # Usar prompt positivo_pos.tolist(), 'paddings': paddings_pos.tolist(),
332
+ }
333
+ text como clave
334
+ print(f"Procesando criterio: \"{criterion_name}\"_embedding_pos = qformer_infer(**qformer_input_text")
335
+ similarity_positive, similarity_negative, difference = None, None, None
336
+ classification__pos)['contrastive_txt_emb'].numpy()
337
+ if text_embedding_poscomp, classification_simp = "ERROR", "ERROR"
338
+ try:.ndim == 1: text_embedding_pos = np.expand_
339
+ # 1. Embeddings de Texto
340
+ tokens_pos, paddings_pos = bert_tokenize(positive_text, bert_preprocessordims(text_embedding_pos, axis=0)
341
+
342
+ # )
343
+ qformer_input_pos = {'image_feature': np2. Embedding Texto Negativo
344
+ tokens_neg, paddings_neg.zeros([1, 8, 8, 1376 = bert_tokenize(negative_text, bert_preprocessor)
345
+ qformer_input_text], dtype=np.float32).tolist(), 'ids': tokens_pos.tolist(), 'padd_neg = {
346
+ 'image_feature': np.zeros([1ings': paddings_pos.tolist()}
347
+ text_embedding_pos = qformer_infer(**qformer_input_pos)['contrastive_txt_emb'].numpy(), 8, 8, 1376], dtype=np
348
+ if text_embedding_pos.ndim == 1: text_embedding_pos = np.expand_dims(text_embedding_pos, axis=0).float32).tolist(), # Dummy
349
+ 'ids': tokens_neg.tolist(), 'paddings': paddings_neg.tolist(),
350
 
351
  tokens_neg, paddings_neg = bert_tokenize(negative_text, bert_preprocessor)
352
+ qformer_input_neg
353
+ }
354
+ text_embedding_neg = qformer_infer(** = {'image_feature': np.zeros([1, 8, qformer_input_text_neg)['contrastive_txt_emb'].numpy()
355
+ if text_embedding_neg.ndim == 1: text_embedding_neg8, 1376], dtype=np.float32). = np.expand_dims(text_embedding_neg, axis=0tolist(), 'ids': tokens_neg.tolist(), 'paddings':)
356
+
357
+ # Verificar compatibilidad de dimensiones para similitud
358
+ if image_embedding paddings_neg.tolist()}
359
+ text_embedding_neg = qformer_infer(**qformer_input_neg)['contrastive_txt_.shape[1] != text_embedding_pos.shape[1]:emb'].numpy()
360
+ if text_embedding_neg.ndim ==
361
+ raise ValueError(f"Dimensión incompatible: Imagen ({image_embedding.shape[11: text_embedding_neg = np.expand_dims(text_embedding_neg, axis=0)
362
+
363
+ # Verificar dimensiones
364
+ if image_embedding.shape]}) vs Texto Pos ({text_embedding_pos.shape[1]})")
365
+ if[1] != text_embedding_pos.shape[1]: raise ValueError(f"Dim mismatch: Img ({image_embedding.shape[1]}) vs Pos ({text_embedding_pos.shape[1]})")
366
+ if image_embedding image_embedding.shape[1] != text_embedding_neg.shape.shape[1] != text_embedding_neg.shape[1]: raise ValueError(f"Dim mismatch: Img ({image_embedding.shape[1]:
367
+ raise ValueError(f"Dimensión incompatible: Imagen ({image_embedding.shape[1[1]}) vs Neg ({text_embedding_neg.shape[1]})")
368
+
369
+ # 2. Calcular Similitudes
370
+ similarity_positive = cosine_similarity(image]}) vs Texto Neg ({text_embedding_neg.shape[1]})")_embedding, text_embedding_pos)[0][0]
371
+ similarity_negative =
372
+
373
+ # 3. Calcular Similitudes
374
+ similarity_positive = cosine_similarity(image_embedding cosine_similarity(image_embedding, text_embedding_neg)[0][, text_embedding_pos)[0][0]
375
+ similarity_negative0]
376
+
377
+ # 3. Clasificar
378
+ difference = similarity_positive - similarity = cosine_similarity(image_embedding, text_embedding_neg)[0_negative
379
+ classification_comp = "PASS" if difference > SIMILARITY_DIFFERENCE][0]
380
+ print(f" Sim (+)={similarity_positive_THRESHOLD else "FAIL"
381
+ classification_simp = "PASS" if:.4f}, Sim (-)={similarity_negative:.4f}")
382
+
383
+ similarity_positive > POSITIVE_SIMILARITY_THRESHOLD else "FAIL"# 4. Clasificar
384
+ difference = similarity_positive - similarity_
385
+ print(f" Sim(+)={similarity_positive:.4f},negative
386
+ classification_comp = "PASS" if difference > SIMILARITY_DIFFERENCE Sim(-)={similarity_negative:.4f}, Diff={difference:.4f_THRESHOLD else "FAIL"
387
+ classification_simp = "PASS" if} -> Comp:{classification_comp}, Simp:{classification_simp}")
388
+ except Exception as similarity_positive > POSITIVE_SIMILARITY_THRESHOLD else "FAIL" e:
389
+ print(f" ERROR procesando criterio '{criterion_name}': {e}"); traceback.print_exc()
390
+ # Mantener clasificaciones como "ERROR
391
+ print(f" Diff={difference:.4f} -> Comp: {classification_comp},"
392
+ detailed_results[criterion_name] = {
393
+ 'positive_prompt': Simp: {classification_simp}")
394
 
395
+ except Exception as e:
396
+ print(f" ERROR procesando criterio '{criterion_name}': {e}")
397
+ traceback.print_exc()
398
+ # Mantener clasificaciones como "ERROR" positive_text, 'negative_prompt': negative_text,
399
+ 'similarity_positive': float(similarity_positive) if similarity_positive is not None else None,
400
 
 
 
401
 
402
+ # Guardar resultados
 
 
 
 
 
403
  detailed_results[criterion_name] = {
404
+ 'similarity_negative': float(similarity_negative) if similarity_negative'positive_prompt': positive_text,
405
+ 'negative_prompt': is not None else None,
406
+ 'difference': float(difference) if negative_text,
407
+ 'similarity_positive': float(similarity_positive difference is not None else None,
408
+ 'classification_comparative': classification) if similarity_positive is not None else None,
409
+ 'similarity__comp, 'classification_simplified': classification_simp
410
  }
411
+ return detailed_resultsnegative': float(similarity_negative) if similarity_negative is not None else None,
412
+ 'difference': float(difference) if difference is not None
413
 
414
  # --- Carga Global de Modelos ---
415
+ print("--- Iniciando carga global de modelos else None,
416
+ 'classification_comparative': classification_comp,
417
+ ---")
418
  start_time = time.time()
419
  models_loaded = False
420
  bert_preprocessor_global = None
421
+ elixrc_infer 'classification_simplified': classification_simp
422
+ }
423
+ return detailed_results
424
+
425
+ # ---_global = None
426
  qformer_infer_global = None
427
+ try: Carga Global de Modelos ---
428
+ # Se ejecuta UNA VEZ al iniciar la
429
+ hf_token = os.environ.get("HF_TOKEN") # Leer aplicación Gradio/Space
430
+ print("--- Iniciando carga global de modelos ---")
431
+ start_ token desde secretos del Space
432
+ if hf_token: print("HFtime = time.time()
433
+ models_loaded = False
434
+ bert_pre_TOKEN encontrado, usando para autenticación.")
435
+
436
+ os.makedirs(MODEL_DOWNLOADprocessor_global = None
437
+ elixrc_infer_global = None
438
+ _DIR, exist_ok=True)
439
+ print(f"Descargando/verificando modelos en: {MODEL_DOWNLOAD_DIR}")qformer_infer_global = None
440
 
441
+ try:
442
+ # Añadir token si
443
  snapshot_download(repo_id=MODEL_REPO_ID, local_dir=MODEL_DOWNLOAD_DIR,
444
+ allow_patterns=['elixr es necesario (para repos privados o gated)
445
+ hf_token = os.environ.get("-c-v2-pooled/*', 'pax-elixr-b-text/*'],
446
  local_dir_use_symlinks=False, token=hf_token) # Pasar token aquí
447
  print("Modelos descargados/verificados.")
448
 
449
+ HF_TOKEN") # Leer token desde secretos del Space
450
+ # if hf_token:
451
+ print("Cargando Preprocesador BERT...")
452
+ bert_preprocess# print("Usando HF_TOKEN para autenticación.")
453
+ # # HfFolder.save_token(hf_token) # Esto no siempre funciona bien en entornos server_handle = "https://tfhub.dev/tensorflow/bert_enless
454
+
455
+ # Crear directorio si no existe
456
+ os.makedirs(MODEL_DOWNLOAD_DIR_uncased_preprocess/3"
457
+ bert_preprocessor_global, exist_ok=True)
458
+ print(f"Descargando/verificando modelos en = tf_hub.KerasLayer(bert_preprocess_handle)
459
+ print("Preprocesador BERT: {MODEL_DOWNLOAD_DIR}")
460
+ snapshot_download(repo_id=MODEL cargado.")
461
+
462
+ print("Cargando ELIXR-C...")_REPO_ID, local_dir=MODEL_DOWNLOAD_DIR,
463
+
464
+ elixrc_model_path = os.path.join( allow_patterns=['elixr-c-v2-pooled/*', 'pax-elixrMODEL_DOWNLOAD_DIR, 'elixr-c-v2--b-text/*'],
465
+ local_dir_use_symlinkspooled')
466
+ elixrc_model = tf.saved_model.=False, # Evitar symlinks
467
+ token=hf_token) # Pasar tokenload(elixrc_model_path)
468
  elixrc_infer_global = elixrc_model.signatures['serving_default']
469
+ print("Modelo aquí
470
+ print("Modelos descargados/verificados.")
471
+
472
+ # C ELIXR-C cargado.")
473
 
474
+ print("Cargando Qargar Preprocesador BERT desde TF Hub
475
+ print("Cargando Preprocesador BERT...")
476
+ Former (ELIXR-B Text)...")
477
+ qformer_model_path = os.path.join(MODEL_DOWNLOAD_DIR, '# Usar handle explícito puede ser más robusto en algunos entornos
478
+ bert_preprocess_pax-elixr-b-text')
479
+ qformer_handle = "https://tfhub.dev/tensorflow/bert_en_model = tf.saved_model.load(qformer_model_pathuncased_preprocess/3"
480
+ bert_preprocessor_global =)
481
  qformer_infer_global = qformer_model.signatures['serving_default']
482
+ tf_hub.KerasLayer(bert_preprocess_handle)
483
  print("Modelo QFormer cargado.")
484
 
485
  models_loaded = True
486
+ end_print("Preprocesador BERT cargado.")
487
+
488
+ # Cargar ELIXR-C
489
+ print("Cargando ELIXR-C...")
490
+ elixrctime = time.time()
491
+ print(f"--- Modelos cargados global_model_path = os.path.join(MODEL_DOWNLOAD_DIRmente con éxito en {end_time - start_time:.2f}, 'elixr-c-v2-pooled')
492
+ el segundos ---")
493
  except Exception as e:
494
  models_loaded = False
495
+ print(ixrc_model = tf.saved_model.load(elixrcf"--- ERROR CRÍTICO DURANTE LA CARGA GLOBAL DE MODELOS_model_path)
496
+ elixrc_infer_global = el ---"); print(e); traceback.print_exc()
497
 
498
+ # --- Función Principal de Procesamiento paraixrc_model.signatures['serving_default']
499
+ print("Modelo Gradio ---
500
+ def assess_quality_and_update_ui(image ELIXR-C cargado.")
501
+
502
+ # Cargar QFormer (_pil):
503
+ """Procesa la imagen y devuelve actualizaciones para la UI."""ELIXR-B Text)
504
+ print("Cargando QFormer
505
  if not models_loaded:
506
+ raise gr.Error("Error: Los (ELIXR-B Text)...")
507
+ qformer_model_ modelos no se pudieron cargar. La aplicación no puede procesar imágenes.")
508
+ if image_pil is Nonepath = os.path.join(MODEL_DOWNLOAD_DIR, 'p:
509
  # Devuelve valores por defecto/vacíos y controla la visibilidad
510
  return (
511
+ ax-elixr-b-text')
512
+ qformer_model gr.update(visible=True), # Muestra bienvenida
513
+ gr.update(visible= = tf.saved_model.load(qformer_model_path)
514
+ qformer_infer_global = qformer_model.signatures['False), # Oculta resultados
515
+ None, # Borra imagen de salidaserving_default']
516
+ print("Modelo QFormer cargado.")
517
+
518
+
519
  gr.update(value="N/A"), # Borra etiqueta
520
+ pdmodels_loaded = True
521
+ end_time = time.time()
522
+ .DataFrame(), # Borra dataframe
523
  None # Borra JSON
524
  )
525
 
526
  print("\n--- Iniciando evaluación para nueva imagen ---")
527
+ start print(f"--- Modelos cargados globalmente con éxito en {end_time_process_time = time.time()
528
  try:
529
+ # - start_time:.2f} segundos ---")
530
+
531
+ except Exception as e:
532
+ models_loaded = False
533
+ print(f"--- ERROR CRÍTICO DUR 1. Convertir a NumPy
534
+ img_np = np.arrayANTE LA CARGA GLOBAL DE MODELOS ---")
535
+ print(e)
536
+ traceback.print_(image_pil.convert('L'))
537
+ print(f"Imagenexc()
538
+ # Gradio se iniciará, pero la función de análisis fallará. convertida a NumPy. Shape: {img_np.shape}, Tipo:
539
+
540
+ # --- Función Principal de Procesamiento para Gradio ---
541
+ def assess_quality_and_ {img_np.dtype}")
542
+ # 2. Generar Embeddingupdate_ui(image_pil):
543
+ """Procesa la imagen y devuelve actualizaciones
544
+ print("Generando embedding de imagen...")
545
+ image_embedding = generate_image_embedding(img_np, elixrc_infer_global, q para la UI."""
546
+ if not models_loaded:
547
+ raise grformer_infer_global)
548
+ print("Embedding de imagen generado.")
549
+ .Error("Error: Los modelos no se pudieron cargar. La aplicación no puede procesar imágenes.")
550
  # 3. Clasificar
551
+ print("Calculando similitudes y clasificando criterios if image_pil is None:
552
+ # Devuelve valores por defecto/vacíos...")
553
+ detailed_results = calculate_similarities_and_classify(image_embedding, bert_preprocessor_global, qformer_infer_ y controla la visibilidad
554
+ return (
555
+ gr.update(visible=Trueglobal)
556
+ print("Clasificación completada.")
557
+ # ), # Muestra bienvenida
558
+ gr.update(visible=False), # Oculta resultados
559
+ 4. Formatear Resultados
560
+ output_data, passed_count,None, # Borra imagen de salida
561
+ gr.update(value="N/A total_count = [], 0, 0
562
+ for criterion, details in detailed_results.items"), # Borra etiqueta
563
+ pd.DataFrame(), # Borra dataframe():
564
  total_count += 1
565
+ sim_pos = details
566
+ None # Borra JSON
567
+ )
568
+
569
+ print("\n--- Iniciando evaluación['similarity_positive']
570
+ sim_neg = details['similarity_negative para nueva imagen ---")
571
+ start_process_time = time.time']
572
  diff = details['difference']
573
  comp = details['classification_comparative']
574
  simp = details['classification_simplified']
575
+ ()
576
+ try:
577
+ # 1. Convertir a NumPy
578
+ img_np = np.array(image_pil.convert('Loutput_data.append([ criterion, f"{sim_pos:.4f}"'))
579
+ print(f"Imagen convertida a NumPy. Shape: {img_np.shape}, Tipo: {img_np.dtype}")
580
+
581
+ if sim_pos is not None else "N/A",
582
+ f"{sim_neg:. # 2. Generar Embedding de Imagen
583
+ print("Generando embedding4f}" if sim_neg is not None else "N/A", de imagen...")
584
+ image_embedding = generate_image_embedding(img f"{diff:.4f}" if diff is not None else "N/_np, elixrc_infer_global, qformer_infer_A", comp, simp ])
585
  if comp == "PASS": passed_count += 1
586
+ global)
587
+ print("Embedding de imagen generado.")
588
+
589
+ # 3 df_results = pd.DataFrame(output_data, columns=[ "Criterion", "Sim (+)", "Sim (-)", "Difference", "Assessment (Comp)", "Assessment (Simp)" ])
590
+ overall_quality = "Error"; pass_. Calcular Similitudes y Clasificar
591
+ print("Calculando similitudesrate = 0
592
  if total_count > 0:
593
+ y clasificando criterios...")
594
+ detailed_results = calculate_similarities_and_classify(pass_rate = passed_count / total_count
595
+ if pass_image_embedding, bert_preprocessor_global, qformer_infer_rate >= 0.85: overall_quality = "Excellent"
596
+ elif pass_rate >= global)
597
+ print("Clasificación completada.")
598
+
599
+ # 0.70: overall_quality = "Good"
600
+ elif pass4. Formatear Resultados para Gradio
601
+ output_data = []
602
+ passed_count = _rate >= 0.50: overall_quality = "Fair"0
603
+ total_count = 0
604
+ for criterion, details in detailed_results.items
605
  else: overall_quality = "Poor"
606
+ quality_label():
607
+ total_count += 1
608
+ sim_pos = details['similarity_positive']
609
+ sim_neg = details['similarity_negative = f"{overall_quality} ({passed_count}/{total_count}']
610
+ diff = details['difference']
611
+ comp = details['classification passed)"
612
  end_process_time = time.time()
613
+ print(f"--- Evaluación completada en {end_process_time - start_process_time:.2f} seg_comparative']
614
+ simp = details['classification_simplified']
615
+ ---")
616
  # Devolver resultados y actualizar visibilidad
617
  return (
618
+ output_data.append([
619
+ criterion,
620
+ f"{sim_pos:.4f}"gr.update(visible=False), # Oculta bienvenida
621
  gr.update(visible=True), # Muestra resultados
622
+ image_pil, # Muestra imagen if sim_pos is not None else "N/A",
623
+ f procesada
624
  gr.update(value=quality_label), # Actualiza etiqueta
625
  df_results, # Actualiza dataframe
626
+ detailed"{sim_neg:.4f}" if sim_neg is not None else_results # Actualiza JSON
627
  )
628
+ except Exception as e "N/A",
629
+ f"{diff:.4f}" if diff:
630
+ print(f"Error durante procesamiento Gradio: {e}"); is not None else "N/A",
631
+ comp,
632
+ simp
633
+ ])
634
+ traceback.print_exc()
635
+ raise gr.Error(f"Error procesando imagen: {str if comp == "PASS":
636
+ passed_count += 1
637
+
638
+ (e)}")
639
 
640
  # --- Función para Resetear la UI ---
641
+ def reset_ui # Crear DataFrame
642
+ df_results = pd.DataFrame(output_data, columns():
643
  print("Reseteando UI...")
644
  return (
645
+ gr.update(visible==[
646
+ "Criterion", "Sim (+)", "Sim (-)", "Difference", "Assessment (CompTrue), # Muestra bienvenida
647
  gr.update(visible=False), # Oculta resultados
648
+ None, # Borra imagen de)", "Assessment (Simp)"
649
+ ])
650
+
651
+ # Calcular etiqueta de calidad general
652
+ overall_quality entrada
653
  None, # Borra imagen de salida
654
  gr.update(value="N/A"), # Borra etiqueta
655
+ pd = "Error"
656
+ pass_rate = 0
657
+ if total_count > 0:
658
+ .DataFrame(), # Borra dataframe
659
  None # Borra JSON
660
  )
661
 
662
  # --- Definir Tema Oscuro Personalizado ---
663
+ # Inspirado en los colores del HTML original y pass_rate = passed_count / total_count
664
+ if pass Tailwind dark grays/blues
665
+ dark_theme = gr.themes.Default_rate >= 0.85: overall_quality = "Excellent"
666
+ elif pass_rate >=(
667
  primary_hue=gr.themes.colors.blue, # Azul como color primario
668
+ secondary_hue=gr.themes.colors.blue, 0.70: overall_quality = "Good"
669
+ elif # Azul secundario
670
+ neutral_hue=gr.themes.colors pass_rate >= 0.50: overall_quality = "Fair.gray, # Gris neutro
671
+ font=[gr.themes.GoogleFont("Inter"
672
+ else: overall_quality = "Poor"
673
+ quality_"), "ui-sans-serif", "system-ui", "sans-label = f"{overall_quality} ({passed_count}/{total_countserif"],
674
+ font_mono=[gr.themes.GoogleFont("Jet} passed)"
675
+
676
+ end_process_time = time.time()
677
+ print(f"---Brains Mono"), "ui-monospace", "Consolas", "monospace"],
678
+ Evaluación completada en {end_process_time - start_process_time:.2f} segundos ---).set(
679
  # Fondos
680
  body_background_fill="#111827", # Fondo principal muy oscuro (gray-900)
681
+ background_fill_primary="#1f2937",")
682
+
683
+ # Devolver resultados y actualizar visibilidad
684
+ return (
685
+ # Fondo de componentes (gray-800)
686
+ background_fill_secondary="#3gr.update(visible=False), # Oculta bienvenida
687
+ gr.update(visible=74151", # Fondo secundario (gray-700)
688
+ block_background_fill="#1f2937", True), # Muestra resultados
689
+ image_pil, # Muestra imagen# Fondo de bloques (gray-800)
690
 
691
  # Texto
692
+ procesada
693
+ gr.update(value=quality_label), # Actualiza etiqueta
694
+ df body_text_color="#d1d5db", # Texto_results, # Actualiza dataframe
695
+ detailed_results # Actualiza JSON
696
+ )
697
+ except Exception as e:
698
+ print(f"Error durante principal claro (gray-300)
699
+ # text_color_subdued="# procesamiento Gradio: {e}")
700
+ traceback.print_exc()
701
+ 9ca3af", # <-- LÍNEA PROBLEMÁTICA EL# Lanzar un gr.Error para mostrarlo en la UI de Gradio
702
+ raise gr.Error(f"Error procesando imagen: {str(e)}")
703
+
704
+
705
+ # --- Función para ResetearIMINADA
706
  block_label_text_color="#d1d5db", # Etiquetas de bloque (gray-300)
707
+ block_title_text la UI ---
708
+ def reset_ui():
709
+ print("Reseteando UI...")
710
+ return (
711
+ gr.update(visible=True), # Muestra bienvenida
712
+ _color="#ffffff", # Títulos de bloque (blanco)
713
 
714
+ gr.update(visible=False), # Oculta resultados
715
+ # Bordes
716
+ border_color_accent="#374151",None, # Borra imagen de entrada
717
+ None, # Bor # Borde (gray-700)
718
+ border_colorra imagen de salida
719
+ gr.update(value="N/A"), # Borra etiqueta
720
+ _primary="#4b5563", # Borde primario (gray-pd.DataFrame(), # Borra dataframe
721
+ None # Borra JSON
722
+ )
723
+
724
+ 600)
725
 
726
  # Botones y Elementos Interactivos
727
+ # --- Definir Tema Oscuro Personalizado (CORREGIDO) ---
728
+ #button_primary_background_fill="*primary_600", # Usa color primario (azul)
729
  button_primary_text_color="#ffffff",
730
+ Inspirado en los colores del HTML original y Tailwind dark grays/blues
731
+ dark_button_secondary_background_fill="*neutral_700",
732
  button_secondary_text_color="#ffffff",
733
+ input_background_fill="#3theme = gr.themes.Default(
734
+ primary_hue=gr.74151", # Fondo de inputs (gray-700)
735
+ input_borderthemes.colors.blue, # Azul como color primario
736
+ secondary_hue=gr.themes.colors.blue, # Azul secundario
737
+ neutral_hue=gr_color="#4b5563", # Borde de inputs (gray-.themes.colors.gray, # Gris neutro
738
+ font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans600)
739
  input_text_color="#ffffff", # Texto en inputs
740
 
741
  # Sombras y Radios
742
+ shadow_drop="rgba(0,0,0,0-serif"],
743
+ font_mono=[gr.themes.GoogleFont("JetBrains Mono"), "ui.2) 0px 2px 4px",
744
+ block-monospace", "Consolas", "monospace"],
745
+ ).set(
746
+ _shadow="rgba(0,0,0,0.2) # Fondos
747
+ body_background_fill="#111827", 0px 2px 5px",
748
+ radius_size="*# Fondo principal muy oscuro (gray-900)
749
+ background_fill_primaryradius_lg", # Bordes redondeados
750
  )
751
 
752
 
753
+ # --- Definir la Interfaz Gradio con="#1f2937", # Fondo de componentes (gray-800)
754
+ Bloques y Tema ---
755
  with gr.Blocks(theme=dark_theme, title="CXR Quality Assessment") as demo:
756
  # --- Cabecera ---
757
  with gr.Row():
758
  gr.Markdown(
759
  """
760
+ # <span style="color: # background_fill_secondary="#374151", #e5e7eb;">CXR Quality Assessment</span>
761
+ <p style Fondo secundario (gray-700)
762
+ block_background_="color: #9ca3af;">Evaluate chest X-ray technical quality usingfill="#1f2937", # Fondo de bloques (gray-8 AI (ELIXR family)</p>
763
+ """,
764
+ elem_id="app-header00)
765
+
766
+ # Texto
767
+ body_text_color="#d1d5db", #"
768
  )
769
 
770
  # --- Contenido Principal (Dos Columnas) ---
771
+ with gr Texto principal claro (gray-300)
772
+ # text_color_subdued.Row(equal_height=False): # Permitir alturas diferentes
773
 
774
+ # --- Columna Iz="#9ca3af", # <--- ESTA LÍNEA CAUSABA EL ERROR Y FUE ELIMINADA/COMENTADA
775
+ block_label_text_color="#d1d5db", # Etiquetas de bloque (gray-300quierda (Carga) ---
776
+ with gr.Column(scale=1,)
777
+ block_title_text_color="#ffffff", # T min_width=350):
778
+ gr.Markdown("### ítulos de bloque (blanco)
779
+
780
+ # Bordes
781
+ border_1. Upload Image", elem_id="upload-title")
782
+ inputcolor_accent="#374151", # Borde (gray-70_image = gr.Image(type="pil", label="Upload Chest X-ray", height=300)
783
+ border_color_primary="#4b55630) # Altura fija para imagen entrada
784
  with gr.Row():
785
+ ", # Borde primario (gray-600)
786
+
787
+ analyze_btn = gr.Button("Analyze Image", variant="primary", scale=2)
788
  reset_btn = gr.Button("Reset", variant="secondary", scale=1)
789
+ ## Botones y Elementos Interactivos
790
+ button_primary_background_fill="*primary_600", # Usa color primario (azul)
791
+ button_primary_ Añadir ejemplos si tienes imágenes de ejemplo
792
  # gr.Examples(
793
+ text_color="#ffffff",
794
+ button_secondary_background_fill="*neutral_700",# examples=[os.path.join("examples", "sample_cx
795
+ button_secondary_text_color="#ffffff",
796
+ input_background_fill="#3r.png")],
797
  # inputs=input_image, label="Example CXR"
798
  # )
799
  gr.Markdown(
800
+ 74151", # Fondo de inputs (gray-700)
801
+ input_border_color="#4b5563", # Borde de inputs (gray-"<p style='color:#9ca3af; font-size:0600)
802
+ input_text_color="#ffffff", #.9em;'>Model loading on startup takes ~1 min. Analysis takes ~15-40 sec.</p>"
803
  )
804
 
805
 
806
  # --- Columna Derecha (Bienvenida / Resultados) ---
807
+ Texto en inputs
808
+
809
+ # Sombras y Radios
810
+ shadow_dropwith gr.Column(scale=2):
811
+
812
+ # --- Bloque de Bienvenida (Visible Inicialmente="rgba(0,0,0,0.2) 0px) ---
813
+ with gr.Column(visible=True, elem_id 2px 4px",
814
+ block_shadow="rgba(0,0="welcome-section") as welcome_block:
815
+ gr.Markdown(,0,0.2) 0px 2px 5px",
816
+ radius_size="*radius_lg", # Bordes redondeados
817
+ )
818
+
819
+
820
 
 
 
 
821
  """
822
  ### Welcome!
823
+ Upload a chest X-ray image (# --- Definir la Interfaz Gradio con Bloques y Tema ---
824
+ with gr.Blocks(themePNG, JPG, etc.) on the left panel and click "Analyze Image".=dark_theme, title="CXR Quality Assessment") as demo:
825
+
826
 
827
+ The system will evaluate its technical quality based on 7 standard criteria using the ELIXR model family. # --- Cabecera ---
828
+ with gr.Row():
829
+ gr.Markdown
830
  The results will appear here once the analysis is complete.
831
+ """,(
832
+ """
833
+ # <span style="color: #e5e7eb;">CXR elem_id="welcome-text"
834
  )
 
 
835
 
836
 
837
+ # --- Blo Quality Assessment</span>
838
+ <p style="color: #9ca3af;">Evaluate chest X-ray technical quality using AI (ELIXR family)</p>
839
+ que de Resultados (Oculto Inicialmente) ---
840
+ with gr.""", # Usar blanco/gris claro para texto cabecera
841
+ elem_id="app-header"
842
+ )
843
+
844
+ # --- Contenido Principal (DosColumn(visible=False, elem_id="results-section") as results Columnas) ---
845
+ with gr.Row(equal_height=False): # Permitir alturas diferentes
846
+
847
+ # --- Columna Izquierda (Carga) ---
848
+ with gr.Column(scale=1, min_width=_block:
849
+ gr.Markdown("### 2. Quality Assessment Results350):
850
+ gr.Markdown("### 1. Upload Image", elem_id="results-title")
851
+ with gr.Row(): # Fila para imagen de salida", elem_id="upload-title")
852
+ input_image = gr.Image(type y resumen
853
  with gr.Column(scale=1):
854
  output_image = gr.Image(type="pil", label="Analyzed Image", interactive=False)
855
+ with gr.Column(scale="pil", label="Upload Chest X-ray", height=300) # Altura fija para imagen entrada
856
+ with gr.Row():
857
+ analyze_btn = gr=1):
858
+ gr.Markdown("#### Summary", elem_id=".Button("Analyze Image", variant="primary", scale=2)
859
+ reset_btn = gr.Button("Reset", variant="secondary", scale=1)
860
+ #summary-title")
861
  output_label = gr.Label(value="N/A", label="Overall Quality Estimate", elem_id="quality-label")
 
862
 
863
+ gr.Markdown Añadir ejemplos si tienes imágenes de ejemplo
864
+ # gr.Examples(
865
+ ("#### Detailed Criteria Evaluation", elem_id="detailed-title")
866
+ output # examples=[os.path.join("examples", "sample__dataframe = gr.DataFrame(
867
+ headers=["Criterion", "Sim (+cxr.png")],
868
+ # inputs=input_image, label)", "Sim (-)", "Difference", "Assessment (Comp)", "Assessment (Simp)"],
869
  label=None, # Quitar etiqueta redundante
870
  wrap=True,
871
+ max="Example CXR"
872
+ # )
873
+ gr.Markdown(
874
+ "<p style='color:#9ca3af; font-size:0.9_rows=10, # Limitar filas visibles con scroll
875
  overflow_row_behaviour="show_ends", # Muestra inicio/fin al hacer scroll
876
+ em;'>Model loading on startup takes ~1 min. Analysis takes ~15-4interactive=False, # No editable
877
  elem_id="results-dataframe"
878
  )
879
+ 0 sec.</p>"
880
+ )
881
+
882
+
883
+ # --- Columna Derecha (Bienvenida / Resultados) ---
884
+ with gr.Column(scale=2): with gr.Accordion("Raw JSON Output (for debugging)", open=False
885
+
886
+ # --- Bloque de Bienvenida (Visible Inicialmente) ---
887
+ with gr.Column(visible=True, elem_id="welcome-section") as welcome_block:
888
+ gr.Markdown):
889
  output_json = gr.JSON(label=None)
890
 
891
  gr.Markdown(
892
  f"""
893
  #### Technical Notes
894
+ * **Criterion:** Quality(
895
+ """
896
+ ### Welcome!
897
+ Upload a chest X-ray image ( aspect evaluated.
898
  * **Sim (+/-):** Cosine similarity with positive/negative prompt.
899
  * **Difference:** Sim (+) - Sim (-).
900
+ *PNG, JPG, etc.) on the left panel and click "Analyze Image". **Assessment (Comp):** PASS if Difference > {SIMILARITY_DI
901
+
902
+ The system will evaluate its technical quality based on 7 standard criteria using the ELIXR model family.FFERENCE_THRESHOLD}. (Main Result)
903
+ * **Assessment (
904
+ The results will appear here once the analysis is complete.
905
+ """,Simp):** PASS if Sim (+) > {POSITIVE_SIMILARITY_THRESHOLD}.
906
  """, elem_id="notes-text"
907
  )
908
 
909
  # --- Pie de página ---
910
  gr.Markdown(
911
  """
912
+ elem_id="welcome-text"
913
+ )
914
+ # Podrías añadir un icono o----
915
+ <p style='text-align:center; color:#9 imagen aquí si quieres
916
+ # gr.Image("path/to/welcome_icon.pngca3af; font-size:0.8em;'>
917
+ C", interactive=False, show_label=False, show_download_button=FalseXR Quality Assessment Tool | Model: google/cxr-foundation | Interface: Gradio
918
  </p>
919
  """, elem_id="app-footer"
920
+ ))
921
+
922
+
923
+ # --- Bloque de Resultados (Oculto Inicialmente) ---
924
+ with gr.
925
 
926
 
927
  # --- Conexiones de Eventos ---
928
  analyze_btn.click(
929
+ fnColumn(visible=False, elem_id="results-section") as results=assess_quality_and_update_ui,
930
+ inputs=[input_block:
931
+ gr.Markdown("### 2. Quality Assessment Results", elem_id="results_image],
932
  outputs=[
933
+ welcome_block, # ->-title")
934
+ with gr.Row(): # Fila para imagen de salida actualiza visibilidad bienvenida
935
  results_block, # -> actualiza visibilidad resultados
936
+ y resumen
937
+ with gr.Column(scale=1):
938
+ outputoutput_image, # -> muestra imagen analizada
939
  output_label, # -> actualiza etiqueta resumen
940
  output_dataframe, # -> actualiza tabla
941
+ output_image = gr.Image(type="pil", label="Analyzed Image_json # -> actualiza JSON
942
  ]
943
  )
944
 
945
  reset_btn.click(
946
  fn=reset_ui,
947
+ inputs=None,", interactive=False)
948
+ with gr.Column(scale=1):
949
+ gr.Markdown("#### # No necesita inputs
950
  outputs=[
951
  welcome_block,
952
+ Summary", elem_id="summary-title")
953
+ output_label = gr.Label(valueresults_block,
954
+ input_image, # -> limpia imagen entrada="N/A", label="Overall Quality Estimate", elem_id="quality
955
  output_image,
956
  output_label,
957
  output_dataframe,
 
959
  ]
960
  )
961
 
962
+ # ----label")
963
+ # Podríamos añadir más texto de resumen aquí si quisiéramos
964
 
965
+ Iniciar la Aplicación Gradio ---
966
  if __name__ == "__main__":
967
+ gr.Markdown("#### Detailed Criteria Evaluation", elem_id="detailed-title # server_name="0.0.0.0" para accesibilidad en red local
968
+ # server_port=7860 es el puerto estándar de HF")
969
+ output_dataframe = gr.DataFrame(
970
+ headers=["Criterion", "Sim (+)", "Sim (-)", "Difference", "Assessment (Comp)", "Assessment (Simp)"],
971
+ label=None, # Quitar etiqueta redundante
972
+ wrap=True,
973
+ # La altura ahora se maneja mejor automáticamente o con CSS
974
+ # row_count=(7, "dynamic Spaces
975
+ demo.launch(server_name="0.0.0") # Mostrar 7 filas, permitir scroll si hay más
976
+ max_rows=10, # Lim.0", server_port=7860)