SushantGautam commited on
Commit
0b9f901
Β·
1 Parent(s): ca59e56

Update feature extraction to use last_hidden_state in submission_task2.py

Browse files
medvqa/competitions/gi-2025/task_2.py CHANGED
@@ -24,7 +24,7 @@ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
24
  submission_file = "submission_task2.py"
25
  file_from_validation = "predictions_2.json"
26
 
27
- min_library = ["datasets>=3.4.1", "transformers", "evaluate", "scipy", "scikit-learn",
28
  "rouge_score", 'tqdm', "gradio_client>=1.8.0"]
29
 
30
  print("🌟 ImageCLEFmed-MEDVQA-GI-2025 🌟",
 
24
  submission_file = "submission_task2.py"
25
  file_from_validation = "predictions_2.json"
26
 
27
+ min_library = ["datasets>=3.4.1", "transformers", "evaluate", "scipy", "scikit-learn", "diffusers", "peft",
28
  "rouge_score", 'tqdm', "gradio_client>=1.8.0"]
29
 
30
  print("🌟 ImageCLEFmed-MEDVQA-GI-2025 🌟",
medvqa/submission_samples/gi-2025/submission_task2.py CHANGED
@@ -122,7 +122,7 @@ modelx.eval()
122
  def extract_features(batch):
123
  inputs = processor(images=batch['image'], return_tensors="pt").to(DEVICE)
124
  with torch.no_grad():
125
- feats = modelx(**inputs).pooler_output
126
  feats = feats / feats.norm(p=2, dim=-1, keepdim=True)
127
  return {'features': feats.cpu().numpy()}
128
 
 
122
  def extract_features(batch):
123
  inputs = processor(images=batch['image'], return_tensors="pt").to(DEVICE)
124
  with torch.no_grad():
125
+ feats = modelx(**inputs).last_hidden_state[:, 0, :]
126
  feats = feats / feats.norm(p=2, dim=-1, keepdim=True)
127
  return {'features': feats.cpu().numpy()}
128