ScienceMetaBench / compare.py
sjshailab
update
f6da3ce
import json
import datetime
import pandas as pd
from difflib import SequenceMatcher
def string_similarity(str1, str2):
# 规则1: 一个为空另一个不为空,相似度为0
if (str1 is None or str1 == "") and (str2 is not None and str2 != ""):
return 0.0
if (str2 is None or str2 == "") and (str1 is not None and str1 != ""):
return 0.0
# 规则2: 二者完全相同(包括全为空),相似度为1
if (str1 or "") == (str2 or ""):
return 1.0
# 规则3: 忽略大小写进行比较
s1_lower = str1.lower()
s2_lower = str2.lower()
# 如果忽略大小写后相同,直接返回1
if s1_lower == s2_lower:
return 1.0
# 使用SequenceMatcher计算相似度
matcher = SequenceMatcher(None, s1_lower, s2_lower)
similarity = matcher.ratio()
return similarity
def main(file_llm = '', file_bench = '', key_list = []):
all_data = {}
with open(file_llm, 'r', encoding='utf-8') as f:
for line in f:
j = json.loads(line).get('llm_response_dict')
# print(j)
all_data[j['sha256']] = {}
all_data[j['sha256']]['llm_response_dict'] = j
with open(file_bench, 'r', encoding='utf-8') as f:
for line in f:
j = json.loads(line)
if j['sha256'] not in all_data:
all_data[j['sha256']] = {}
all_data[j['sha256']]['benchmark_dict'] = j
sha256_to_remove = []
for sha256, value in all_data.items():
# 检查是否同时包含这两个键
if 'llm_response_dict' not in value or 'benchmark_dict' not in value:
sha256_to_remove.append(sha256)
for sha256 in sha256_to_remove:
all_data.pop(sha256)
for sha256, value in all_data.items():
all_data[sha256]['similarity'] = {}
for key in key_list:
# print(key)
all_data[sha256]['similarity'][key] = string_similarity(all_data[sha256]['llm_response_dict'].get(key), all_data[sha256]['benchmark_dict'][key])
# print(all_data)
key_accuracy_tmp = {key: 0 for key in key_list}
for sha256, value in all_data.items():
for key in key_list:
key_accuracy_tmp[key] += value['similarity'][key]
# print(key_accuracy_tmp)
key_accuracy = {k: v / len(all_data) for k,v in key_accuracy_tmp.items()}
# print(key_accuracy)
accuracy = sum(list(key_accuracy.values())) / len(list(key_accuracy.values()))
return accuracy, key_accuracy, all_data
def write_similarity_data_to_excel(key_list, data_dict, output_file="similarity_analysis.xlsx"):
"""
将相似度分析数据写入Excel文件
Args:
data_dict: 包含相似度分析数据的字典
output_file: 输出Excel文件名
"""
# 准备数据列表
rows = []
for sha256, data in data_dict.items():
row = {
'sha256': sha256
}
for field in key_list:
# llm_response_dict 字段
row[f'llm_{field}'] = data['llm_response_dict'].get(field)
# benchmark_dict 字段
row[f'benchmark_{field}'] = data['benchmark_dict'].get(field)
# similarity 字段
row[f'similarity_{field}'] = data['similarity'].get(field)
rows.append(row)
# 创建DataFrame
df = pd.DataFrame(rows)
# 定义列的顺序(可选,让Excel更易读)
column_order = ['sha256']
for field in key_list:
column_order.extend([f'llm_{field}', f'benchmark_{field}', f'similarity_{field}'])
# 重新排列列顺序
df = df[column_order]
# 写入Excel文件
with pd.ExcelWriter(output_file, engine='openpyxl') as writer:
df.to_excel(writer, sheet_name='相似度分析', index=False)
# # 获取工作表并调整列宽
# worksheet = writer.sheets['相似度分析']
# worksheet.column_dimensions['A'].width = 70 # sha256列
print(f"数据已成功写入 {output_file}")
print(f"总共处理了 {len(rows)} 条记录")
return df
if __name__ == '__main__':
file_llm = 'data/llm-label_textbook.jsonl'
file_bench = 'data/benchmark_textbook.jsonl'
# key_list = ['doi', 'title', 'author', 'keyword', 'abstract', 'pub_time']
key_list = ['isbn', 'title', 'author', 'abstract', 'category', 'pub_time', 'publisher']
accuracy, key_accuracy, detail_data = main(file_llm, file_bench, key_list)
# print(detail_data)
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
output_filename = f"similarity_analysis_{timestamp}.xlsx"
write_similarity_data_to_excel(key_list, detail_data, output_filename)
print(key_accuracy)
print(accuracy)