mike23415 commited on
Commit
9fd7d89
·
verified ·
1 Parent(s): 589342e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -12
app.py CHANGED
@@ -1,16 +1,17 @@
1
- import torch
2
  from transformers import pipeline
3
 
4
- # Force the model to use CPU explicitly
5
- device = torch.device("cpu")
 
 
 
6
 
7
- summarizer = pipeline("summarization", model="t5-base", device=-1) # -1 ensures CPU usage
 
8
 
9
- text = "This is a long text that needs summarization."
10
-
11
- # Dynamically adjust max_length based on input length
12
- input_length = len(text.split()) # Approximate token count
13
- max_length = min(50, int(input_length * 0.8)) # 80% of input length
14
-
15
- summary = summarizer(text, max_length=max_length, min_length=5, do_sample=False)
16
- print(summary[0]["summary_text"])
 
1
+ import os
2
  from transformers import pipeline
3
 
4
+ # Ensure HF doesn't request a token
5
+ os.environ["HF_HOME"] = "/app/cache"
6
+ os.environ["HF_HUB_DISABLE_SYMLINKS_WARNING"] = "1"
7
+ os.environ["HF_HUB_DISABLE_TELEMETRY"] = "1"
8
+ os.environ["HF_HUB_OFFLINE"] = "0"
9
 
10
+ # Load model
11
+ summarizer = pipeline("summarization", model="t5-base")
12
 
13
+ # Ensure script runs properly
14
+ if __name__ == "__main__":
15
+ print("Application started successfully!")
16
+ result = summarizer("This is a long text that needs summarization.", max_length=50, min_length=10, do_sample=False)
17
+ print(result)