File size: 883 Bytes
096df1c f531a83 096df1c f531a83 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
import os
from PIL import Image
import torch
from torchvision import transforms
os.system("pip install -qr https://github.com/hustvl/YOLOP/blob/main/requirements.txt")
# load model
model = torch.hub.load('hustvl/yolop', 'yolop', pretrained=True)
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
transform=transforms.Compose([
transforms.ToTensor(),
# normalize
])
def inference(img):
# print(img.size)
img = img.resize((640, 640))
img = torch.unsqueeze(transform(img), dim=0)
# img = transform(img)
det_out, da_seg_out,ll_seg_out = model(img)
ll_out = ll_seg_out[0][0, :, :].detach().numpy()
da_out = da_seg_out[0][0, :, :].detach().numpy()
return da_out,ll_out
gr.Interface(inference,gr.inputs.Image(type="pil"),["image","image"]).launch(debug=True) |