gleisonnanet commited on
Commit
ba788e4
·
1 Parent(s): a5fb092

downgrade python

Browse files
Files changed (2) hide show
  1. Dockerfile +2 -2
  2. main.py +2 -4
Dockerfile CHANGED
@@ -1,5 +1,5 @@
1
- FROM python:3.9
2
-
3
 
4
  WORKDIR /code
5
 
 
1
+ # FROM python:3.9
2
+ FROM python:3.7-slim-buster
3
 
4
  WORKDIR /code
5
 
main.py CHANGED
@@ -8,7 +8,7 @@ from pydantic import BaseModel
8
  from enum import Enum
9
  from transformers import M2M100Tokenizer, M2M100ForConditionalGeneration, AutoTokenizer, AutoModelForSeq2SeqLM
10
  import torch
11
-
12
  app = FastAPI(docs_url="/")
13
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
14
 
@@ -67,7 +67,7 @@ tokenizer = AutoTokenizer.from_pretrained(chat_model_name)
67
  modelchat = AutoModelForSeq2SeqLM.from_pretrained(chat_model_name)
68
 
69
  @app.get("/chat")
70
- async def read_root(text: str, ):
71
  input_ids = tokenizer(
72
  [WHITESPACE_HANDLER(text)],
73
  return_tensors="pt",
@@ -98,8 +98,6 @@ async def read_root(text: str, ):
98
 
99
 
100
  if __name__ == "__main__":
101
- import uvicorn
102
-
103
  uvicorn.run(app, host="0.0.0.0", port=7860)
104
 
105
 
 
8
  from enum import Enum
9
  from transformers import M2M100Tokenizer, M2M100ForConditionalGeneration, AutoTokenizer, AutoModelForSeq2SeqLM
10
  import torch
11
+ import uvicorn
12
  app = FastAPI(docs_url="/")
13
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
14
 
 
67
  modelchat = AutoModelForSeq2SeqLM.from_pretrained(chat_model_name)
68
 
69
  @app.get("/chat")
70
+ async def read_root(text: str):
71
  input_ids = tokenizer(
72
  [WHITESPACE_HANDLER(text)],
73
  return_tensors="pt",
 
98
 
99
 
100
  if __name__ == "__main__":
 
 
101
  uvicorn.run(app, host="0.0.0.0", port=7860)
102
 
103