|
|
import json |
|
|
import datetime |
|
|
import pandas as pd |
|
|
from difflib import SequenceMatcher |
|
|
|
|
|
def string_similarity(str1, str2): |
|
|
|
|
|
if (str1 is None or str1 == "") and (str2 is not None and str2 != ""): |
|
|
return 0.0 |
|
|
if (str2 is None or str2 == "") and (str1 is not None and str1 != ""): |
|
|
return 0.0 |
|
|
|
|
|
|
|
|
if (str1 or "") == (str2 or ""): |
|
|
return 1.0 |
|
|
|
|
|
|
|
|
|
|
|
s1_lower = str1.lower() |
|
|
s2_lower = str2.lower() |
|
|
|
|
|
|
|
|
if s1_lower == s2_lower: |
|
|
return 1.0 |
|
|
|
|
|
|
|
|
matcher = SequenceMatcher(None, s1_lower, s2_lower) |
|
|
similarity = matcher.ratio() |
|
|
|
|
|
return similarity |
|
|
|
|
|
def main(file_llm = '', file_bench = '', key_list = []): |
|
|
all_data = {} |
|
|
with open(file_llm, 'r', encoding='utf-8') as f: |
|
|
for line in f: |
|
|
j = json.loads(line).get('llm_response_dict') |
|
|
|
|
|
all_data[j['sha256']] = {} |
|
|
all_data[j['sha256']]['llm_response_dict'] = j |
|
|
|
|
|
with open(file_bench, 'r', encoding='utf-8') as f: |
|
|
for line in f: |
|
|
j = json.loads(line) |
|
|
if j['sha256'] not in all_data: |
|
|
all_data[j['sha256']] = {} |
|
|
all_data[j['sha256']]['benchmark_dict'] = j |
|
|
|
|
|
sha256_to_remove = [] |
|
|
for sha256, value in all_data.items(): |
|
|
|
|
|
if 'llm_response_dict' not in value or 'benchmark_dict' not in value: |
|
|
sha256_to_remove.append(sha256) |
|
|
for sha256 in sha256_to_remove: |
|
|
all_data.pop(sha256) |
|
|
|
|
|
for sha256, value in all_data.items(): |
|
|
all_data[sha256]['similarity'] = {} |
|
|
for key in key_list: |
|
|
|
|
|
all_data[sha256]['similarity'][key] = string_similarity(all_data[sha256]['llm_response_dict'].get(key), all_data[sha256]['benchmark_dict'][key]) |
|
|
|
|
|
|
|
|
key_accuracy_tmp = {key: 0 for key in key_list} |
|
|
for sha256, value in all_data.items(): |
|
|
for key in key_list: |
|
|
key_accuracy_tmp[key] += value['similarity'][key] |
|
|
|
|
|
|
|
|
key_accuracy = {k: v / len(all_data) for k,v in key_accuracy_tmp.items()} |
|
|
|
|
|
accuracy = sum(list(key_accuracy.values())) / len(list(key_accuracy.values())) |
|
|
return accuracy, key_accuracy, all_data |
|
|
|
|
|
def write_similarity_data_to_excel(key_list, data_dict, output_file="similarity_analysis.xlsx"): |
|
|
""" |
|
|
将相似度分析数据写入Excel文件 |
|
|
|
|
|
Args: |
|
|
data_dict: 包含相似度分析数据的字典 |
|
|
output_file: 输出Excel文件名 |
|
|
""" |
|
|
|
|
|
|
|
|
rows = [] |
|
|
|
|
|
for sha256, data in data_dict.items(): |
|
|
row = { |
|
|
'sha256': sha256 |
|
|
} |
|
|
|
|
|
for field in key_list: |
|
|
|
|
|
row[f'llm_{field}'] = data['llm_response_dict'].get(field) |
|
|
|
|
|
row[f'benchmark_{field}'] = data['benchmark_dict'].get(field) |
|
|
|
|
|
row[f'similarity_{field}'] = data['similarity'].get(field) |
|
|
rows.append(row) |
|
|
|
|
|
|
|
|
df = pd.DataFrame(rows) |
|
|
|
|
|
|
|
|
column_order = ['sha256'] |
|
|
for field in key_list: |
|
|
column_order.extend([f'llm_{field}', f'benchmark_{field}', f'similarity_{field}']) |
|
|
|
|
|
|
|
|
df = df[column_order] |
|
|
|
|
|
|
|
|
with pd.ExcelWriter(output_file, engine='openpyxl') as writer: |
|
|
df.to_excel(writer, sheet_name='相似度分析', index=False) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print(f"数据已成功写入 {output_file}") |
|
|
print(f"总共处理了 {len(rows)} 条记录") |
|
|
|
|
|
return df |
|
|
|
|
|
if __name__ == '__main__': |
|
|
file_llm = 'data/llm-label_textbook.jsonl' |
|
|
file_bench = 'data/benchmark_textbook.jsonl' |
|
|
|
|
|
key_list = ['isbn', 'title', 'author', 'abstract', 'category', 'pub_time', 'publisher'] |
|
|
|
|
|
accuracy, key_accuracy, detail_data = main(file_llm, file_bench, key_list) |
|
|
|
|
|
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") |
|
|
output_filename = f"similarity_analysis_{timestamp}.xlsx" |
|
|
write_similarity_data_to_excel(key_list, detail_data, output_filename) |
|
|
|
|
|
print(key_accuracy) |
|
|
print(accuracy) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|