File size: 16,955 Bytes
80a598c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 |
import copy
from typing import Union
import dspy
# from storm_wiki.modules.storm_dataclass import StormArticle
import concurrent.futures
import json
import os
import pickle
import re
import sys
from typing import List, Dict
import httpx
import toml
from langchain_text_splitters import RecursiveCharacterTextSplitter
from trafilatura import extract
class ArticleTextProcessing:
@staticmethod
def limit_word_count_preserve_newline(input_string, max_word_count):
"""
Limit the word count of an input string to a specified maximum, while preserving the integrity of complete lines.
The function truncates the input string at the nearest word that does not exceed the maximum word count,
ensuring that no partial lines are included in the output. Words are defined as text separated by spaces,
and lines are defined as text separated by newline characters.
Args:
input_string (str): The string to be truncated. This string may contain multiple lines.
max_word_count (int): The maximum number of words allowed in the truncated string.
Returns:
str: The truncated string with word count limited to `max_word_count`, preserving complete lines.
"""
word_count = 0
limited_string = ''
for word in input_string.split('\n'):
line_words = word.split()
for lw in line_words:
if word_count < max_word_count:
limited_string += lw + ' '
word_count += 1
else:
break
if word_count >= max_word_count:
break
limited_string = limited_string.strip() + '\n'
return limited_string.strip()
@staticmethod
def remove_citations(s):
"""
Removes all citations from a given string. Citations are assumed to be in the format
of numbers enclosed in square brackets, such as [1], [2], or [1, 2], etc. This function searches
for all occurrences of such patterns and removes them, returning the cleaned string.
Args:
s (str): The string from which citations are to be removed.
Returns:
str: The string with all citation patterns removed.
"""
return re.sub(r'\[\d+(?:,\s*\d+)*\]', '', s)
@staticmethod
def get_first_section_dict_and_list(s):
"""
"""
text = s
sections = text.strip().split('\n# ')
titles = []
content_dict = {}
for section in sections:
if section:
lines = section.split('\n', 1)
title = lines[0].strip()
content = lines[1].strip() if len(lines) > 1 else ""
titles.append(title)
content_dict[title] = content
return content_dict, titles
@staticmethod
def parse_citation_indices(s):
"""
Extracts citation indexes from the provided content string and returns them as a list of integers.
Args:
content (str): The content string containing citations in the format [number].
Returns:
List[int]: A list of unique citation indexes extracted from the content, in the order they appear.
"""
matches = re.findall(r'\[\d+\]', s)
return [int(index[1:-1]) for index in matches]
@staticmethod
def remove_uncompleted_sentences_with_citations(text):
"""
Removes uncompleted sentences and standalone citations from the input text. Sentences are identified
by their ending punctuation (.!?), optionally followed by a citation in square brackets (e.g., "[1]").
Grouped citations (e.g., "[1, 2]") are split into individual ones (e.g., "[1] [2]"). Only text up to
and including the last complete sentence and its citation is retained.
Args:
text (str): The input text from which uncompleted sentences and their citations are to be removed.
Returns:
str: The processed string with uncompleted sentences and standalone citations removed, leaving only
complete sentences and their associated citations if present.
"""
# Convert citations like [1, 2, 3] to [1][2][3].
def replace_with_individual_brackets(match):
numbers = match.group(1).split(', ')
return ' '.join(f'[{n}]' for n in numbers)
# Deduplicate and sort individual groups of citations.
def deduplicate_group(match):
citations = match.group(0)
unique_citations = list(set(re.findall(r'\[\d+\]', citations)))
sorted_citations = sorted(unique_citations, key=lambda x: int(x.strip('[]')))
# Return the sorted unique citations as a string
return ''.join(sorted_citations)
text = re.sub(r'\[([0-9, ]+)\]', replace_with_individual_brackets, text)
text = re.sub(r'(\[\d+\])+', deduplicate_group, text)
# Deprecated: Remove sentence without proper ending punctuation and citations.
# Split the text into sentences (including citations).
# sentences_with_trailing = re.findall(r'([^.!?]*[.!?].*?)(?=[^.!?]*[.!?]|$)', text)
# Filter sentences to ensure they end with a punctuation mark and properly formatted citations
# complete_sentences = []
# for sentence in sentences_with_trailing:
# # Check if the sentence ends with properly formatted citations
# if re.search(r'[.!?]( \[\d+\])*$|^[^.!?]*[.!?]$', sentence.strip()):
# complete_sentences.append(sentence.strip())
# combined_sentences = ' '.join(complete_sentences)
# Check for and append any complete citations that follow the last sentence
# trailing_citations = re.findall(r'(\[\d+\]) ', text[text.rfind(combined_sentences) + len(combined_sentences):])
# if trailing_citations:
# combined_sentences += ' '.join(trailing_citations)
# Regex pattern to match sentence endings, including optional citation markers.
eos_pattern = r'([.!?])\s*(\[\d+\])?\s*'
matches = list(re.finditer(eos_pattern, text))
if matches:
last_match = matches[-1]
text = text[:last_match.end()].strip()
return text
@staticmethod
def clean_up_citation(conv):
for turn in conv.dlg_history:
turn.agent_utterance = turn.agent_utterance[:turn.agent_utterance.find('References:')]
turn.agent_utterance = turn.agent_utterance[:turn.agent_utterance.find('Sources:')]
turn.agent_utterance = turn.agent_utterance.replace('Answer:', '').strip()
try:
max_ref_num = max([int(x) for x in re.findall(r'\[(\d+)\]', turn.agent_utterance)])
except Exception as e:
max_ref_num = 0
if max_ref_num > len(turn.search_results):
for i in range(len(turn.search_results), max_ref_num + 1):
turn.agent_utterance = turn.agent_utterance.replace(f'[{i}]', '')
turn.agent_utterance = ArticleTextProcessing.remove_uncompleted_sentences_with_citations(
turn.agent_utterance)
return conv
@staticmethod
def clean_up_outline(outline, topic=""):
output_lines = []
current_level = 0 # To track the current section level
for line in outline.split('\n'):
stripped_line = line.strip()
if topic != "" and f"# {topic.lower()}" in stripped_line.lower():
output_lines = []
# Check if the line is a section header
if stripped_line.startswith('#') and stripped_line != '#':
current_level = stripped_line.count('#')
output_lines.append(stripped_line)
# Check if the line is a bullet point
# elif stripped_line.startswith('-'):
# subsection_header = '#' * (current_level + 1) + ' ' + stripped_line[1:].strip()
# output_lines.append(subsection_header)
# Preserve lines with @
elif stripped_line.startswith('@'):
output_lines.append(stripped_line)
outline = '\n'.join(output_lines)
# Remove references.
outline = re.sub(r"#[#]? See also.*?(?=##|$)", '', outline, flags=re.DOTALL)
outline = re.sub(r"#[#]? See Also.*?(?=##|$)", '', outline, flags=re.DOTALL)
outline = re.sub(r"#[#]? Notes.*?(?=##|$)", '', outline, flags=re.DOTALL)
outline = re.sub(r"#[#]? References.*?(?=##|$)", '', outline, flags=re.DOTALL)
outline = re.sub(r"#[#]? External links.*?(?=##|$)", '', outline, flags=re.DOTALL)
outline = re.sub(r"#[#]? External Links.*?(?=##|$)", '', outline, flags=re.DOTALL)
outline = re.sub(r"#[#]? Bibliography.*?(?=##|$)", '', outline, flags=re.DOTALL)
outline = re.sub(r"#[#]? Further reading*?(?=##|$)", '', outline, flags=re.DOTALL)
outline = re.sub(r"#[#]? Further Reading*?(?=##|$)", '', outline, flags=re.DOTALL)
outline = re.sub(r"#[#]? Summary.*?(?=##|$)", '', outline, flags=re.DOTALL)
outline = re.sub(r"#[#]? Appendices.*?(?=##|$)", '', outline, flags=re.DOTALL)
outline = re.sub(r"#[#]? Appendix.*?(?=##|$)", '', outline, flags=re.DOTALL)
return outline
@staticmethod
def clean_up_section(text):
"""Clean up a section:
1. Remove uncompleted sentences (usually due to output token limitation).
2. Deduplicate individual groups of citations.
3. Remove unnecessary summary."""
paragraphs = text.split('\n')
output_paragraphs = []
summary_sec_flag = False
for p in paragraphs:
p = p.strip()
if len(p) == 0:
continue
if not p.startswith('#'):
p = ArticleTextProcessing.remove_uncompleted_sentences_with_citations(p)
if summary_sec_flag:
if p.startswith('#'):
summary_sec_flag = False
else:
continue
if p.startswith('Overall') or p.startswith('In summary') or p.startswith('In conclusion'):
continue
if "# Summary" in p or '# Conclusion' in p:
summary_sec_flag = True
continue
output_paragraphs.append(p)
return '\n\n'.join(output_paragraphs) # Join with '\n\n' for markdown format.
@staticmethod
def update_citation_index(s, citation_map):
"""Update citation index in the string based on the citation map."""
for original_citation in citation_map:
s = s.replace(f"[{original_citation}]", f"__PLACEHOLDER_{original_citation}__")
for original_citation, unify_citation in citation_map.items():
s = s.replace(f"__PLACEHOLDER_{original_citation}__", f"[{unify_citation}]")
return s
@staticmethod
def parse_article_into_dict(input_string):
"""
Parses a structured text into a nested dictionary. The structure of the text
is defined by markdown-like headers (using '#' symbols) to denote sections
and subsections. Each section can contain content and further nested subsections.
The resulting dictionary captures the hierarchical structure of sections, where
each section is represented as a key (the section's title) mapping to a value
that is another dictionary. This dictionary contains two keys:
- 'content': content of the section
- 'subsections': a list of dictionaries, each representing a nested subsection
following the same structure.
Args:
input_string (str): A string containing the structured text to parse.
Returns:
A dictionary representing contains the section title as the key, and another dictionary
as the value, which includes the 'content' and 'subsections' keys as described above.
"""
lines = input_string.split('\n')
lines = [line for line in lines if line.strip()]
root = {'content': '', 'subsections': {}}
current_path = [(root, -1)] # (current_dict, level)
for line in lines:
if line.startswith('#'):
level = line.count('#')
title = line.strip('# ').strip()
new_section = {'content': '', 'subsections': {}}
# Pop from stack until find the parent level
while current_path and current_path[-1][1] >= level:
current_path.pop()
# Append new section to the nearest upper level's subsections
current_path[-1][0]['subsections'][title] = new_section
current_path.append((new_section, level))
else:
current_path[-1][0]['content'] += line + '\n'
return root['subsections']
class FileIOHelper:
@staticmethod
def dump_json(obj, file_name, encoding="utf-8"):
with open(file_name, 'w', encoding=encoding) as fw:
json.dump(obj, fw, default=FileIOHelper.handle_non_serializable, ensure_ascii=False)
@staticmethod
def handle_non_serializable(obj):
return "non-serializable contents" # mark the non-serializable part
@staticmethod
def load_json(file_name, encoding="utf-8"):
with open(file_name, 'r', encoding=encoding) as fr:
return json.load(fr)
@staticmethod
def write_str(s, path):
with open(path, 'w') as f:
f.write(s)
@staticmethod
def load_str(path):
with open(path, 'r') as f:
return '\n'.join(f.readlines())
@staticmethod
def dump_pickle(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f)
@staticmethod
def load_pickle(path):
with open(path, 'rb') as f:
return pickle.load(f)
class ArticlePolishingModule():
"""
The interface for article generation stage. Given topic, collected information from
knowledge curation stage, generated outline from outline generation stage.
"""
def __init__(self,
article_gen_lm: Union[dspy.dsp.LM, dspy.dsp.HFModel],
article_polish_lm: Union[dspy.dsp.LM, dspy.dsp.HFModel]):
self.article_gen_lm = article_gen_lm
self.article_polish_lm = article_polish_lm
self.polish_page = PolishPageModule(
write_lead_engine=self.article_gen_lm,
polish_engine=self.article_polish_lm
)
def polish_article(self,
topic: str,
draft_article,
remove_duplicate: bool = False):
"""
Polish article.
Args:
topic (str): The topic of the article.
draft_article (StormArticle): The draft article.
remove_duplicate (bool): Whether to use one additional LM call to remove duplicates from the article.
"""
article_text = draft_article.to_string()
remove_duplicate = True
polish_result = self.polish_page(topic=topic, draft_page=article_text, polish_whole_page=remove_duplicate)
polished_article = polish_result.page
polished_article_dict = ArticleTextProcessing.parse_article_into_dict(polished_article)
polished_article = copy.deepcopy(draft_article)
polished_article.insert_or_create_section(article_dict=polished_article_dict)
polished_article.post_processing()
return polished_article
class PolishPage(dspy.Signature):
"""
You are a faithful text editor that is good at finding repeated information in the article and deleting them to make sure there is no repetition in the article.
You won't delete any non-repeated part in the article.
You will keep the inline citations and article structure (indicated by "#", "##", etc.) appropriately.
In the article, do not include references.
Do your job for the following article.
"""
article = dspy.InputField(prefix="The article you need to polish:\n", format=str)
page = dspy.OutputField(
prefix="Your revised article:\n",
format=str)
class PolishPageModule(dspy.Module):
def __init__(self, write_lead_engine: Union[dspy.dsp.LM, dspy.dsp.HFModel],
polish_engine: Union[dspy.dsp.LM, dspy.dsp.HFModel]):
super().__init__()
self.write_lead_engine = write_lead_engine
self.polish_engine = polish_engine
self.polish_page = dspy.Predict(PolishPage)
def forward(self, topic: str, draft_page: str, polish_whole_page: bool = True):
with dspy.settings.context(lm=self.polish_engine):
page = self.polish_page(article=draft_page).page
return dspy.Prediction(page=page)
|