FuriouslyAsleep commited on
Commit
3ccf190
·
1 Parent(s): 9ae66cd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -83,8 +83,8 @@ def greet(htmlString,userQuestion):
83
 
84
  question = userQuestion
85
  single_html_string = htmlString
86
- #single_html_string = "<h1>Grid Fin</h1><table><tr><td><p>Cost Risk</p></td><td><p>None</p></td></tr><tr><td><p>Schedule Risk</p></td><td><p>None</p></td></tr></table><h1>Propulsion</h1><table><tr><td><p>Cost Risk</p></td><td><p>Yes, material overrun</p></td></tr><tr><td><p>Schedule Risk</p></td><td><p>None</p></td></tr></table><h1>Nose Cone</h1><table><tr><td><p>Cost Risk</p></td><td><p>None</p></td></tr><tr><td><p>Schedule Risk</p></td><td><p>None</p></td></tr></table>"
87
- inputs = tokenizer.encode_plus(question, single_html_string, return_tensors="pt", padding="max_length", max_length=512, truncation=True)
88
 
89
  answer_start_scores, answer_end_scores = model(**inputs, return_dict=False)
90
  answer_start = torch.argmax(answer_start_scores) # get the most likely beginning of answer with the argmax of the score
@@ -114,7 +114,7 @@ iface = gr.Interface(
114
  greet,
115
  [
116
  gr.inputs.Textbox(
117
- lines=3, default="<h1>Grid Fin</h1><table><tr><td><p>Cost Risk</p></td><td><p>None</p></td></tr><tr><td><p>Schedule Risk</p></td><td><p>None</p></td></tr></table><h1>Propulsion</h1><table><tr><td><p>Cost Risk</p></td><td><p>Yes, material overrun</p></td></tr><tr><td><p>Schedule Risk</p></td><td><p>None</p></td></tr></table><h1>Nose Cone</h1><table><tr><td><p>Cost Risk</p></td><td><p>None</p></td></tr><tr><td><p>Schedule Risk</p></td><td><p>None</p></td></tr></table>"
118
  ),
119
  gr.inputs.Textbox(lines=3, default="What is the propulsion risk?"),
120
  ],
 
83
 
84
  question = userQuestion
85
  single_html_string = htmlString
86
+ #single_html_string = "<h1>Grid Fin</h1><table><tr><td><p>Cost Risk</p></td><td><p>None</p></td></tr><tr><td><p>Schedule Risk</p></td><td><p>None</p></td></tr></table><h1>Propulsion</h1><table><tr><td><p>Cost Risk</p></td><td><p>Yes, material overrun</p></td></tr><tr><td><p>Schedule Risk</p></td><td><p>None</p></td></tr></table><h1>Nose Cone</h1><table><tr><td><p>Cost Risk</p></td><td><p>Labor rate increases</p></td></tr><tr><td><p>Schedule Risk</p></td><td><p>None</p></td></tr></table>"
87
+ inputs = tokenizer.encode_plus(question, single_html_string, return_tensors="pt", padding="max_length", max_length=10, truncation=True)
88
 
89
  answer_start_scores, answer_end_scores = model(**inputs, return_dict=False)
90
  answer_start = torch.argmax(answer_start_scores) # get the most likely beginning of answer with the argmax of the score
 
114
  greet,
115
  [
116
  gr.inputs.Textbox(
117
+ lines=3, default="<h1>Grid Fin</h1><table><tr><td><p>Cost Risk</p></td><td><p>None</p></td></tr><tr><td><p>Schedule Risk</p></td><td><p>None</p></td></tr></table><h1>Propulsion</h1><table><tr><td><p>Cost Risk</p></td><td><p>Yes, material overrun</p></td></tr><tr><td><p>Schedule Risk</p></td><td><p>None</p></td></tr></table><h1>Nose Cone</h1><table><tr><td><p>Cost Risk</p></td><td><p>Labor rate increases</p></td></tr><tr><td><p>Schedule Risk</p></td><td><p>None</p></td></tr></table>"
118
  ),
119
  gr.inputs.Textbox(lines=3, default="What is the propulsion risk?"),
120
  ],