4090 / split_slide.py
HaiLin1's picture
Upload split_slide.py with huggingface_hub
e1c42ff verified
import json
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
# nltk.download('punkt')
def split_content_by_tokens(data, max_tokens=200):
"""
Splits content into chunks of approximately max_tokens, respecting sentence boundaries.
Args:
data (list): List of dictionaries containing "page_number" and "content".
max_tokens (int): Maximum number of tokens per chunk.
Returns:
list: A new list of dictionaries with split content.
"""
processed_data = []
chunk_id = 1
prev_page = -1
current_chunk = []
current_token_count = 0
for record in data:
page_number = record.get("page_number")
content = record.get("content", "")
if prev_page == -1:
prev_page = page_number
elif prev_page != page_number: # not the same page
if current_chunk:
processed_data.append({
"id": chunk_id,
"page_number": prev_page,
"content": " ".join(current_chunk),
"type": "slide"
})
chunk_id += 1
current_chunk = []
current_token_count = 0
prev_page = page_number
# Tokenize content into sentences
sentences = sent_tokenize(content)
for sentence in sentences:
sentence_tokens = word_tokenize(sentence)
sentence_length = len(sentence_tokens)
# Check if adding this sentence exceeds the token limit
if current_token_count + sentence_length > max_tokens:
# Save the current chunk
if current_chunk:
processed_data.append({
"id": chunk_id,
"page_number": page_number,
"content": " ".join(current_chunk),
"type": "slide"
})
chunk_id += 1
# Start a new chunk
current_chunk = []
current_token_count = 0
# Add the current sentence to the chunk
current_chunk.append(sentence)
current_token_count += sentence_length
# Save the last chunk
if current_chunk:
processed_data.append({
"id": chunk_id+1,
"page_number": prev_page,
"content": " ".join(current_chunk),
"type": "slide"
})
return processed_data
if __name__ == "__main__":
# Load your JSON file
input_file = "/Users/yuchenhua/Coding/pdf/1121ppt.json"
output_file = "1121_ppt.json"
with open(input_file, 'r') as f:
data = json.load(f)
# Process the data
split_data = split_content_by_tokens(data)
# Save the processed data to a new JSON file
with open(output_file, 'w') as f:
json.dump(split_data, f, indent=4)
print(f"Processed content has been saved to {output_file}")