Inoob commited on
Commit
b1af8c9
·
verified ·
1 Parent(s): b32a0cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -34
app.py CHANGED
@@ -77,40 +77,41 @@ for x in samples:
77
  # For demonstration purposes, we'll reverse the input as the model output
78
  # Replace this part with your model's actual output
79
  model_output_full = st.text_input("Model Ouput:", "")
80
-
81
- # Extract the text between <back> and </back> tags
82
- tag1 = model_output_full.find("<back>")
83
- tag2 = model_output_full.find("</back>")
84
- model_output = model_output_full[tag1 + 6: tag2]
85
- st.subheader("Model Output")
86
- st.write(model_output)
87
-
88
- # Tokenize both outputs for BLEU calculation
89
- reference_tokens = nltk.word_tokenize(true_output)
90
- candidate_tokens = nltk.word_tokenize(model_output)
91
-
92
- # Compute BLEU score (using the single reference)
93
- bleu_score = sentence_bleu([reference_tokens], candidate_tokens)
94
- st.write("**BLEU Score:**", bleu_score)
95
-
96
- # Compute ROUGE scores
97
- rouge_scores = rouge.get_scores(model_output, true_output)
98
- st.write("**ROUGE Scores:**")
99
- st.json(rouge_scores)
100
-
101
- # Compute character-level accuracy and precision
102
- accuracy_metric = char_accuracy(true_output, model_output)
103
- precision_metric = char_precision(true_output, model_output)
104
- st.write("**Character Accuracy:**", accuracy_metric)
105
- st.write("**Character Precision:**", precision_metric)
106
-
107
- st.markdown("---")
108
-
109
- # Append metrics to lists
110
- acc.append(accuracy_metric)
111
- pres.append(precision_metric)
112
- bleu.append(bleu_score)
113
- rouges.append(rouge_scores)
 
114
 
115
  # Allow the user to download the metrics
116
  if st.button("Download Metrics"):
 
77
  # For demonstration purposes, we'll reverse the input as the model output
78
  # Replace this part with your model's actual output
79
  model_output_full = st.text_input("Model Ouput:", "")
80
+ if st.Button("Submit"):
81
+
82
+ # Extract the text between <back> and </back> tags
83
+ tag1 = model_output_full.find("<back>")
84
+ tag2 = model_output_full.find("</back>")
85
+ model_output = model_output_full[tag1 + 6: tag2]
86
+ st.subheader("Model Output")
87
+ st.write(model_output)
88
+
89
+ # Tokenize both outputs for BLEU calculation
90
+ reference_tokens = nltk.word_tokenize(true_output)
91
+ candidate_tokens = nltk.word_tokenize(model_output)
92
+
93
+ # Compute BLEU score (using the single reference)
94
+ bleu_score = sentence_bleu([reference_tokens], candidate_tokens)
95
+ st.write("**BLEU Score:**", bleu_score)
96
+
97
+ # Compute ROUGE scores
98
+ rouge_scores = rouge.get_scores(model_output, true_output)
99
+ st.write("**ROUGE Scores:**")
100
+ st.json(rouge_scores)
101
+
102
+ # Compute character-level accuracy and precision
103
+ accuracy_metric = char_accuracy(true_output, model_output)
104
+ precision_metric = char_precision(true_output, model_output)
105
+ st.write("**Character Accuracy:**", accuracy_metric)
106
+ st.write("**Character Precision:**", precision_metric)
107
+
108
+ st.markdown("---")
109
+
110
+ # Append metrics to lists
111
+ acc.append(accuracy_metric)
112
+ pres.append(precision_metric)
113
+ bleu.append(bleu_score)
114
+ rouges.append(rouge_scores)
115
 
116
  # Allow the user to download the metrics
117
  if st.button("Download Metrics"):