|
|
import json |
|
|
import nltk |
|
|
from nltk.tokenize import sent_tokenize, word_tokenize |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def split_content_by_tokens(data, max_tokens=200): |
|
|
""" |
|
|
Splits content into chunks of approximately max_tokens, respecting sentence boundaries. |
|
|
|
|
|
Args: |
|
|
data (list): List of dictionaries containing "page_number" and "content". |
|
|
max_tokens (int): Maximum number of tokens per chunk. |
|
|
|
|
|
Returns: |
|
|
list: A new list of dictionaries with split content. |
|
|
""" |
|
|
processed_data = [] |
|
|
chunk_id = 1 |
|
|
prev_page = -1 |
|
|
current_chunk = [] |
|
|
current_token_count = 0 |
|
|
|
|
|
for record in data: |
|
|
page_number = record.get("page_number") |
|
|
content = record.get("content", "") |
|
|
|
|
|
if prev_page == -1: |
|
|
prev_page = page_number |
|
|
elif prev_page != page_number: |
|
|
if current_chunk: |
|
|
processed_data.append({ |
|
|
"id": chunk_id, |
|
|
"page_number": prev_page, |
|
|
"content": " ".join(current_chunk), |
|
|
"type": "slide" |
|
|
}) |
|
|
chunk_id += 1 |
|
|
current_chunk = [] |
|
|
current_token_count = 0 |
|
|
prev_page = page_number |
|
|
|
|
|
|
|
|
sentences = sent_tokenize(content) |
|
|
|
|
|
for sentence in sentences: |
|
|
sentence_tokens = word_tokenize(sentence) |
|
|
sentence_length = len(sentence_tokens) |
|
|
|
|
|
|
|
|
if current_token_count + sentence_length > max_tokens: |
|
|
|
|
|
if current_chunk: |
|
|
processed_data.append({ |
|
|
"id": chunk_id, |
|
|
"page_number": page_number, |
|
|
"content": " ".join(current_chunk), |
|
|
"type": "slide" |
|
|
}) |
|
|
chunk_id += 1 |
|
|
|
|
|
current_chunk = [] |
|
|
current_token_count = 0 |
|
|
|
|
|
|
|
|
current_chunk.append(sentence) |
|
|
current_token_count += sentence_length |
|
|
|
|
|
|
|
|
if current_chunk: |
|
|
processed_data.append({ |
|
|
"id": chunk_id+1, |
|
|
"page_number": prev_page, |
|
|
"content": " ".join(current_chunk), |
|
|
"type": "slide" |
|
|
}) |
|
|
|
|
|
return processed_data |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
input_file = "/Users/yuchenhua/Coding/pdf/1121ppt.json" |
|
|
output_file = "1121_ppt.json" |
|
|
|
|
|
with open(input_file, 'r') as f: |
|
|
data = json.load(f) |
|
|
|
|
|
|
|
|
split_data = split_content_by_tokens(data) |
|
|
|
|
|
|
|
|
with open(output_file, 'w') as f: |
|
|
json.dump(split_data, f, indent=4) |
|
|
|
|
|
print(f"Processed content has been saved to {output_file}") |
|
|
|