Spaces:
Sleeping
Sleeping
File size: 8,204 Bytes
462fea8 57d40ed 462fea8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 |
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from tokenize import tokenize\n",
"from io import BytesIO\n",
"\n",
"code = \"\"\"import nltk\n",
" from nltk.stem import PorterStemmer\n",
" porter_stemmer=PorterStemmer()\n",
" words=[\"connect\",\"connected\",\"connection\",\"connections\",\"connects\"]\n",
" stemmed_words=[porter_stemmer.stem(word) for word in words]\n",
" stemmed_words\"\"\"\n",
" \n",
"for tok in tokenize(BytesIO(code.encode('utf-8')).readline):\n",
" print(f\"Type: {tok.type}\\nString: {tok.string}\\nStart: {tok.start}\\nEnd: {tok.end}\\nLine: {tok.line.strip()}\\n======\\n\")\n"
]
},
{
"cell_type": "code",
"execution_count": 40,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['Create a function to summarize the data.', 'For each column in the dataframe, create a correlation matrix.', '3']\n"
]
}
],
"source": [
"import re\n",
"my_summary = '\\n1. Create a function to summarize the code.\\n2. At first, we will start by importing the pandas and numpy modules.'.strip()\n",
"my_summary = 'Create a function summarize and load the dataset.\\n1. To Load the dataset\\n2. To display the basic information\\n3.'.strip()\n",
"my_summary = '\\n1. Create a function to summarize the data.\\n2. For each column in the dataframe, create a correlation matrix.\\n3'\n",
"my_symmary = \"\\n1. Create a function to summarize the code.\\n2. At first, we will start by importing the pandas and numpy modules.\"\n",
"sentences = my_summary.split('\\n')[1:]\n",
"#remove the trailing list enumeration\n",
"new_sentences = []\n",
"for sentence in sentences:\n",
" new_sentences.append(re.sub(\"[0-9]+\\.\\s\", \"\", sentence))\n",
"print(new_sentences)"
]
},
{
"cell_type": "code",
"execution_count": 42,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"1. Create a function to summarize the data.\n",
"2.\n",
"the sentence is valid? True\n",
"\n",
" False SPACE\n",
"1 False X\n",
". False PUNCT\n",
"Create True VERB\n",
"a True DET\n",
"function True NOUN\n",
"to True PART\n",
"summarize True VERB\n",
"the True DET\n",
"data True NOUN\n",
". False PUNCT\n",
"\n",
" False SPACE\n",
"2 False X\n",
". False PUNCT\n",
"For each column in the dataframe, create a correlation matrix.\n",
"\n",
"the sentence is valid? True\n",
"For True ADP\n",
"each True DET\n",
"column True NOUN\n",
"in True ADP\n",
"the True DET\n",
"dataframe True NOUN\n",
", False PUNCT\n",
"create True VERB\n",
"a True DET\n",
"correlation True NOUN\n",
"matrix True NOUN\n",
". False PUNCT\n",
"\n",
" False SPACE\n",
"3\n",
"the sentence is valid? False\n",
"3 False NUM\n"
]
}
],
"source": [
"import spacy\n",
"nlp = spacy.load(\"en_core_web_sm\")\n",
"\n",
"\n",
"def is_valid(words: list[str]):\n",
" has_noun = False\n",
" has_verb = False\n",
" for word in words: \n",
" if word.pos_ in ['NOUN', 'PROPN', 'PRON']:\n",
" has_noun = True\n",
" if word.pos_ == 'VERB':\n",
" has_verb = True\n",
" return has_noun and has_verb\n",
"\n",
"doc = nlp(my_summary)\n",
"sentences = list(doc.sents)\n",
"\n",
"for sentence in sentences:\n",
" print(sentence)\n",
" print(\"the sentence is valid?\", is_valid(sentence))\n",
" for word in sentence:\n",
" print(word, word.is_alpha, word.pos_)\n"
]
},
{
"cell_type": "code",
"execution_count": 50,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Truncation was not explicitly activated but `max_length` is provided a specific value, please use `truncation=True` to explicitly truncate examples to max length. Defaulting to 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy more precisely by providing a specific strategy to `truncation`.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"['this function will build a model that can be used to train and']\n"
]
}
],
"source": [
"from transformers import T5Tokenizer, T5ForConditionalGeneration\n",
"example_text = \"This function will build a model that can be used to train and evaluate the model.\"\n",
"tokenizer = T5Tokenizer.from_pretrained('t5-small')\n",
"model = T5ForConditionalGeneration.from_pretrained('t5-small')\n",
"inputs = tokenizer.batch_encode_plus([\"summarize: \" + example_text], max_length=1024, return_tensors=\"pt\", pad_to_max_length=True) # Batch size 1\n",
"outputs = model.generate(inputs['input_ids'], num_beams=2, max_length=15, early_stopping=True)\n",
"\n",
"print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in outputs])"
]
},
{
"cell_type": "code",
"execution_count": 59,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Device set to use mps:0\n"
]
},
{
"data": {
"text/plain": [
"[{'summary_text': 'An apple a day, keeps the'}]"
]
},
"execution_count": 59,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from transformers import pipeline\n",
"summarizer = pipeline(\"summarization\", model=\"facebook/bart-large-cnn\", tokenizer=\"facebook/bart-large-cnn\")\n",
"summarizer(\"An apple a day, keeps the doctor away\", min_length=5, max_length=10)"
]
},
{
"cell_type": "code",
"execution_count": 76,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"[nltk_data] Downloading package punkt to /Users/irma/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"This function will build a model that can be used to train and evaluate the model.\n",
"27\n"
]
}
],
"source": [
"from transformers import AutoTokenizer, AutoModelForSeq2SeqLM\n",
"import nltk\n",
"nltk.download('punkt')\n",
"\n",
"tokenizer = AutoTokenizer.from_pretrained(\"fabiochiu/t5-small-medium-title-generation\")\n",
"model = AutoModelForSeq2SeqLM.from_pretrained(\"fabiochiu/t5-small-medium-title-generation\")\n",
"\n",
"text = \"This function will build a model that can be used to train and evaluate the model.\"\n",
"\n",
"inputs = [\"summarize: \" + text]\n",
"\n",
"inputs = tokenizer(inputs, max_length=1024, truncation=True, return_tensors=\"pt\")\n",
"output = model.generate(**inputs, num_beams=4, do_sample=True, min_length=10, max_length=len(text) // 3)\n",
"decoded_output = tokenizer.batch_decode(output, skip_special_tokens=True)[0]\n",
"predicted_title = nltk.sent_tokenize(decoded_output.strip())[0]\n",
"\n",
"print(predicted_title)\n",
"# Conversational AI: The Future of Customer Service\n",
"print(len(text) // 3)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.2"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
|