Spaces:
Runtime error
Runtime error
Commit
·
516fe27
1
Parent(s):
42bb2a8
Update app.py
Browse files
app.py
CHANGED
@@ -6,22 +6,13 @@ import IPython.display
|
|
6 |
from PIL import Image
|
7 |
import base64
|
8 |
import torch
|
|
|
9 |
|
10 |
|
11 |
-
from transformers import pipeline
|
12 |
|
13 |
-
|
14 |
|
15 |
-
def predict(text):
|
16 |
-
return pipe(text)[0]["translation_text"]
|
17 |
-
|
18 |
-
demo = gr.Interface(
|
19 |
-
fn=predict,
|
20 |
-
inputs='text',
|
21 |
-
outputs='text',
|
22 |
-
)
|
23 |
|
24 |
-
demo.launch()
|
25 |
|
26 |
|
27 |
#def greet(name):
|
@@ -37,8 +28,13 @@ demo.launch()
|
|
37 |
#gr.Textbox(os.environ['HF_TOKENS'])
|
38 |
|
39 |
#Image-to-text endpoint
|
40 |
-
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
42 |
# "Authorization": f"Bearer {os.environ['HF_TOKENS']}",
|
43 |
# "Content-Type": "application/json"
|
44 |
# }
|
@@ -72,13 +68,13 @@ def captioner(image):
|
|
72 |
result = get_completion(base64_image)
|
73 |
return result[0]['generated_text']
|
74 |
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
|
83 |
|
84 |
|
@@ -90,4 +86,4 @@ def captioner(image):
|
|
90 |
# // allow_flagging="never",
|
91 |
# // examples=["christmas_dog.jpeg", "bird_flight.jpeg", "cow.jpeg"])
|
92 |
|
93 |
-
|
|
|
6 |
from PIL import Image
|
7 |
import base64
|
8 |
import torch
|
9 |
+
from transformers import pipeline
|
10 |
|
11 |
|
|
|
12 |
|
13 |
+
completion_obj = pipeline("image-to-text",model="Salesforce/blip-image-captioning-base")
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
|
|
16 |
|
17 |
|
18 |
#def greet(name):
|
|
|
28 |
#gr.Textbox(os.environ['HF_TOKENS'])
|
29 |
|
30 |
#Image-to-text endpoint
|
31 |
+
def get_completion(inputs):
|
32 |
+
|
33 |
+
output = get_completion(input)
|
34 |
+
return output[0]['generated_text']
|
35 |
+
|
36 |
+
|
37 |
+
# headers = {
|
38 |
# "Authorization": f"Bearer {os.environ['HF_TOKENS']}",
|
39 |
# "Content-Type": "application/json"
|
40 |
# }
|
|
|
68 |
result = get_completion(base64_image)
|
69 |
return result[0]['generated_text']
|
70 |
|
71 |
+
gr.close_all()
|
72 |
+
demo = gr.Interface(fn=captioner,
|
73 |
+
inputs=[gr.Image(label="Upload image", type="pil")],
|
74 |
+
outputs=[gr.Textbox(label="Caption")],
|
75 |
+
title="Image Captioning with BLIP",
|
76 |
+
description="Caption any image using the BLIP model",
|
77 |
+
allow_flagging="never")
|
78 |
|
79 |
|
80 |
|
|
|
86 |
# // allow_flagging="never",
|
87 |
# // examples=["christmas_dog.jpeg", "bird_flight.jpeg", "cow.jpeg"])
|
88 |
|
89 |
+
demo.launch()
|