|
import os |
|
from PIL import Image |
|
import torch |
|
from torchvision import transforms |
|
|
|
os.system("pip install -qr https://github.com/hustvl/YOLOP/blob/main/requirements.txt") |
|
|
|
|
|
model = torch.hub.load('hustvl/yolop', 'yolop', pretrained=True) |
|
|
|
|
|
normalize = transforms.Normalize( |
|
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] |
|
) |
|
|
|
transform=transforms.Compose([ |
|
transforms.ToTensor(), |
|
|
|
]) |
|
|
|
|
|
def inference(img): |
|
|
|
|
|
img = img.resize((640, 640)) |
|
|
|
img = torch.unsqueeze(transform(img), dim=0) |
|
|
|
|
|
|
|
det_out, da_seg_out,ll_seg_out = model(img) |
|
ll_out = ll_seg_out[0][0, :, :].detach().numpy() |
|
da_out = da_seg_out[0][0, :, :].detach().numpy() |
|
return da_out,ll_out |
|
|
|
gr.Interface(inference,gr.inputs.Image(type="pil"),["image","image"]).launch(debug=True) |