File size: 4,259 Bytes
ac62d24 6f78e6a ac62d24 2a789b7 6f78e6a 2a789b7 ac62d24 2a789b7 ac62d24 e59c587 ac62d24 6f78e6a ac62d24 2a789b7 ac62d24 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
import csv
import json
import os
from datasets import GeneratorBasedBuilder, Features, Value, Sequence, SplitGenerator, BuilderConfig, DatasetInfo, Split, Image
import logging
import pandas as pd
from typing import Dict
CITATION = ""
_DESCRIPTION = ""
_HOMEPAGE = "https://huggingface.co./datasets/SarcasmNet/self-annotated_reddit_climate_comment"
_LICENSE = "MIT"
_URL = "https://github.com/catherine-ywang/Reddit-Climate-Environment-Sarcasm-Self-Annotated-Data/raw/main/self_annotated_comments.csv"
class NewDataset(GeneratorBasedBuilder):
def _info(self):
return DatasetInfo(
description=_DESCRIPTION,
features=Features({
"id": Value("string"),
"post_title": Value("string"),
"post_author": Value("string"),
"post_body": Value("string"),
"post_url": Value("string"),
"post_pic": Image(),
"subreddit": Value("string"),
"post_timestamp": Value("string"),
"post_upvotes": Value("int32"),
"post_permalink": Value("string"),
"comments": Sequence({
"CommentID": Value("string"),
"CommentAuthor": Value("string"),
"CommentBody": Value("string"),
"CommentTimestamp": Value("string"),
"CommentUpvotes": Value("int32"),
"CommentPermalink": Value("string"),
"Label": Value("int32")
})
}),
homepage=_HOMEPAGE,
)
def _split_generators(self, dl_manager):
return [SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepath": dl_manager.download(_URL)})]
def _generate_examples(self, filepath):
df = pd.read_csv(filepath)
for column in df.columns:
df[column] = df[column].replace({pd.NA: None})
# Group the DataFrame by post ID
grouped_df = df.groupby('PostID')
for post_id, group in grouped_df:
post_data = group.iloc[0] # Get the data for the post
post_title = post_data['PostTitle']
post_author = post_data['PostAuthor']
post_body = post_data['PostBody']
post_url = post_data['PostUrl']
post_pic = post_data['PostPic']
subreddit = post_data['Subreddit']
post_timestamp = post_data['PostTimestamp']
post_upvotes = post_data['PostUpvotes']
post_permalink = post_data['PostPermalink']
comments = []
# Iterate over each unique comment ID
for comment_id in group['CommentID'].unique():
comment_data = group[group['CommentID'] == comment_id].iloc[0]
comment_author = comment_data['CommentAuthor']
comment_body = comment_data['CommentBody']
comment_timestamp = comment_data['CommentTimestamp']
comment_upvotes = comment_data['CommentUpvotes']
comment_permalink = comment_data['CommentPermalink']
comment_label = comment_data['Label']
# Add comment with its replies to the list
comment = {
"CommentID": comment_id,
"CommentAuthor": comment_author,
"CommentBody": comment_body,
"CommentTimestamp": comment_timestamp,
"CommentUpvotes": comment_upvotes,
"CommentPermalink": comment_permalink,
"Label": comment_label
}
comments.append(comment)
example = {
"id": post_id,
"post_title": post_title,
"post_author": post_author,
"post_body": post_body,
"post_url": post_url,
"post_pic": post_pic,
"subreddit": subreddit,
"post_timestamp": post_timestamp,
"post_upvotes": post_upvotes,
"post_permalink": post_permalink,
"comments": comments
}
yield post_id, example
|