support extract data by ChatGPT4o.

The instructions is generated dynamically.
This commit is contained in:
Blade He 2024-09-05 17:22:26 -05:00
parent 7c83f9152a
commit 1caf552065
6 changed files with 663 additions and 19 deletions

View File

@ -0,0 +1,6 @@
{
"tor": "fund_level",
"ogc": "share_level",
"ter": "share_level",
"performance_fee": "share_level"
}

View File

@ -0,0 +1,6 @@
{
"tor": "TOR",
"ogc": "OGC",
"ter": "TER",
"performance_fee": "performance fees"
}

276
core/data_extraction.py Normal file
View File

@ -0,0 +1,276 @@
import os
import json
import json_repair
import re
import fitz
import pandas as pd
from utils.gpt_utils import chat
from utils.pdf_util import PDFUtil
from utils.sql_query_util import query_document_fund_mapping
from utils.logger import logger
from utils.biz_utils import add_slash_to_text_as_regex, clean_text
class DataExtraction:
def __init__(
self,
doc_id: str,
pdf_file: str,
output_data_folder: str,
page_text_dict: dict,
datapoint_page_info: dict,
document_mapping_info_df: pd.DataFrame
) -> None:
self.doc_id = doc_id
self.pdf_file = pdf_file
if output_data_folder is None or len(output_data_folder) == 0:
output_data_folder = r"/data/emea_ar/output/extract_data/docs/"
os.makedirs(output_data_folder, exist_ok=True)
self.output_data_json_folder = os.path.join(output_data_folder, "json/")
os.makedirs(self.output_data_json_folder, exist_ok=True)
self.output_data_excel_folder = os.path.join(output_data_folder, "excel/")
os.makedirs(self.output_data_excel_folder, exist_ok=True)
if page_text_dict is None or len(page_text_dict.keys()) == 0:
self.page_text_dict = self.get_pdf_page_text_dict()
else:
self.page_text_dict = page_text_dict
if document_mapping_info_df is None or len(document_mapping_info_df) == 0:
self.document_mapping_info_df = query_document_fund_mapping(doc_id)
else:
self.document_mapping_info_df = document_mapping_info_df
self.datapoint_page_info = datapoint_page_info
self.datapoints = self.get_datapoints_from_datapoint_page_info()
self.instructions_config = self.get_instructions_config()
self.datapoint_level_config = self.get_datapoint_level()
self.datapoint_name_config = self.get_datapoint_name()
def get_instructions_config(self) -> dict:
instructions_config_file = r"./instructions/data_extraction_prompts_config.json"
with open(instructions_config_file, "r", encoding="utf-8") as f:
instructions_config = json.load(f)
return instructions_config
def get_datapoint_level(self) -> dict:
datapoint_level_file = r"./configuration/datapoint_level.json"
with open(datapoint_level_file, "r", encoding="utf-8") as f:
datapoint_level = json.load(f)
return datapoint_level
def get_datapoint_name(self) -> dict:
datapoint_name_file = r"./configuration/datapoint_name.json"
with open(datapoint_name_file, "r", encoding="utf-8") as f:
datapoint_name = json.load(f)
return datapoint_name
def get_pdf_page_text_dict(self) -> dict:
pdf_util = PDFUtil(self.pdf_file)
success, text, page_text_dict = pdf_util.extract_text()
return page_text_dict
def get_datapoints_from_datapoint_page_info(self) -> list:
datapoints = list(self.datapoint_page_info.keys())
if "doc_id" in datapoints:
datapoints.remove("doc_id")
return datapoints
def extract_data(self) -> dict:
"""
keys are
doc_id, page_index, datapoint, value, raw_fund_name, fund_id, fund_name, raw_share_name, share_id, share_name
"""
data_list = []
for page_num, page_text in self.page_text_dict.items():
page_datapoints = self.get_datapoints_by_page_num(page_num)
if len(page_datapoints) == 0:
continue
instructions = self.get_instructions_by_datapoints(page_text, page_datapoints)
response, with_error = chat(instructions)
if with_error:
logger.error(f"Error in extracting tables from page")
return ""
try:
data = json.loads(response)
except:
try:
data = json_repair.loads(response)
except:
data = {}
data_dict = {"doc_id": self.doc_id}
data_dict["page_index"] = page_num
data_dict["datapoints"] = ", ".join(page_datapoints)
data_dict["page_text"] = page_text
data_dict["instructions"] = instructions
data_dict["raw_answer"] = response
data_dict["data"] = data
data_list.append(data_dict)
json_data_file = os.path.join(self.output_data_json_folder, f"{self.doc_id}.json")
with open(json_data_file, "w", encoding="utf-8") as f:
json.dump(data_list, f, ensure_ascii=False, indent=4)
data_df = pd.DataFrame(data_list)
data_df.reset_index(drop=True, inplace=True)
excel_data_file = os.path.join(self.output_data_excel_folder, f"{self.doc_id}.xlsx")
with pd.ExcelWriter(excel_data_file) as writer:
data_df.to_excel(writer, sheet_name="extract_data", index=False)
return data_list
def get_datapoints_by_page_num(self, page_num: int) -> list:
datapoints = []
for datapoint in self.datapoints:
if page_num in self.datapoint_page_info[datapoint]:
datapoints.append(datapoint)
return datapoints
def get_instructions_by_datapoints(self, page_text: str, datapoints: list) -> str:
"""
Get instructions to extract data from the page by the datapoints
Below is the instructions sections:
summary: string
reported_name by datapoints: dict
data_business_features: dict
common: list
investment_level by datapoints: dict
data_value_range by datapoints: dict
special_rule by datapoints: dict
special_cases: dict
common: list
title
contents
special_case by datapoints: list
title
contents
output_requirement
common: list
fund_level: list
share_level: dict
fund_name: list
share_name: list
ogc_value: list
ter_value: list
performance_fee_value: list
end
"""
instructions = [f"Context:\n{page_text}\n\nInstructions:\n"]
datapoint_name_list = []
for datapoint in datapoints:
datapoint_name = self.datapoint_name_config.get(datapoint, "")
datapoint_name_list.append(datapoint_name)
summary = self.instructions_config.get("summary", "\n")
instructions.append(summary.format(', '.join(datapoint_name_list)))
instructions.append("\n")
instructions.append("Datapoints Reported name:\n")
reported_name_info = self.instructions_config.get("reported_name", {})
for datapoint in datapoints:
reported_name = reported_name_info.get(datapoint, "")
instructions.append(reported_name)
instructions.append("\n")
instructions.append("\n")
instructions.append("Data business features:\n")
data_business_features = self.instructions_config.get("data_business_features", {})
common = '\n'.join(data_business_features.get("common", []))
instructions.append(common)
instructions.append("\n")
instructions.append("Datapoints investment level:\n")
investment_level_info = data_business_features.get("investment_level", {})
for datapoint in datapoints:
investment_level = investment_level_info.get(datapoint, "")
instructions.append(investment_level)
instructions.append("\n")
instructions.append("\n")
instructions.append("Datapoints value range:\n")
data_value_range_info = data_business_features.get("data_value_range", {})
for datapoint in datapoints:
data_value_range = data_value_range_info.get(datapoint, "")
instructions.append(data_value_range)
instructions.append("\n")
instructions.append("\n")
special_rule_info = data_business_features.get("special_rule", {})
with_special_rule_title = False
for datapoint in datapoints:
special_rule_list = special_rule_info.get(datapoint, [])
if len(special_rule_list) > 0:
if not with_special_rule_title:
instructions.append("Special rule:\n")
with_special_rule_title = True
special_rule = '\n'.join(special_rule_list)
instructions.append(special_rule)
instructions.append("\n\n")
instructions.append("\n")
instructions.append("Special cases:\n")
special_cases = self.instructions_config.get("special_cases", {})
special_cases_common_list = special_cases.get("common", [])
for special_cases_common in special_cases_common_list:
title = special_cases_common.get("title", "")
instructions.append(title)
instructions.append("\n")
contents_list = special_cases_common.get("contents", [])
contents = '\n'.join(contents_list)
instructions.append(contents)
instructions.append("\n\n")
for datapoint in datapoints:
special_case_list = special_cases.get(datapoint, [])
for special_case in special_case_list:
title = special_case.get("title", "")
instructions.append(title)
instructions.append("\n")
contents_list = special_case.get("contents", [])
contents = '\n'.join(contents_list)
instructions.append(contents)
instructions.append("\n\n")
instructions.append("\n")
instructions.append("Output requirement:\n")
output_requirement = self.instructions_config.get("output_requirement", {})
output_requirement_common_list = output_requirement.get("common", [])
instructions.append("\n".join(output_requirement_common_list))
instructions.append("\n")
share_datapoint_value_example = {}
share_level_config = output_requirement.get("share_level", {})
for datapoint in datapoints:
investment_level = self.datapoint_level_config.get(datapoint, "")
if investment_level == "fund_level":
fund_level_example_list = output_requirement.get("fund_level", [])
for example in fund_level_example_list:
instructions.append(example)
instructions.append("\n")
instructions.append("\n")
elif investment_level == "share_level":
share_datapoint_value_example[datapoint] = share_level_config.get(f"{datapoint}_value", [])
share_datapoint_list = list(share_datapoint_value_example.keys())
if len(share_datapoint_list) > 0:
fund_name_example_list = share_level_config.get("fund_name", [])
share_name_example_list = share_level_config.get("share_name", [])
for index in range(len(fund_name_example_list)):
example_dict = {"fund name": fund_name_example_list[index],
"share name": share_name_example_list[index]}
for share_datapoint in share_datapoint_list:
share_datapoint_values = share_datapoint_value_example[share_datapoint]
if index < len(share_datapoint_values):
example_dict[share_datapoint] = share_datapoint_values[index]
instructions.append(f"Example {index + 1}:\n")
instructions.append(json.dumps(example_dict, ensure_ascii=False))
instructions.append("\n")
instructions.append("\n")
end_list = self.instructions_config.get("end", [])
instructions.append('\n'.join(end_list))
instructions.append("\n")
instructions.append("Answer:\n")
instructions_text = ''.join(instructions)
return instructions_text

View File

@ -3,7 +3,8 @@ Context:
Instructions:
Read the context carefully.
Maybe there are TOR, TER, performance fees, OGC data in the context.
Maybe exists TOR, TER, performance fees, OGC data in the context.
The TOR reported name could be:
TOR, Turnover Ratio, Portfolio Turnover, Portfolio turnover ratio, PTR, etc.

View File

@ -0,0 +1,141 @@
{
"summary": "Read the context carefully.\nMaybe exists {} data in the context.\n",
"reported_name": {
"tor": "The TOR reported name could be:\nTOR, Turnover Ratio, Portfolio Turnover, Portfolio turnover ratio, PTR, etc.",
"ogc": "The OGC reported name could be:\nOGC, OGF, Ongoing Charge, Operation Charge, Ongoing charges in per cent, Ongoing charges in percent, Ongoing charges as a percentage, On Going Charges, Operating Charge, Ongoing Fund Charge, etc.",
"ter": "The TER reported name could be:\nTER, Total Expense Ratio, Total expense ratio as a percentage, Total Fund Charge, Gross Expense Ratio, All in fee, Total Net Expense Ratio, Weighted Average Expense Ratio, Synthetic total Expense Ratio, Annualised TER including performance fees, Capped Expense Ratio, etc.",
"performance_fee": "The performance fees reported name could be:\nperformance fees, performance fees ratio, Performance, etc."
},
"data_business_features": {
"common": [
"Most of cases, the data is in the table(s) of context.",
"Fund name: a. The full fund name should be main fund name + sub-fund name, e,g, main fund name is Black Rock European, sub-fund name is Growth, the full fund name is: Black Rock European Growth.\nb. The sub-fund name may be as the first column values in the table.",
"If with multiple data values in same row, please extract the latest.",
"Only output the values which with significant reported names.\nPlease exclude below reported names and relevant values: \"Management Fees\", \"Management\", \"Management Fees p.a.\", \"Taxe d Abonnement in % p.a.\".\nDON'T EXTRACT MANAGEMENT FEES!",
"One fund could be with multiple share classes and relevant share class level data values."
],
"investment_level": {
"tor": "TOR is fund level data.",
"ogc": "OGC is share class level data",
"ter": "TER is share class level data.",
"performance_fee": "Performance fees is share class level data."
},
"data_value_range": {
"tor": "TOR is belong to percentage number, the value could be more than 100, e.g. 126.33.\nTOR could be negative number, e.g. -7.99",
"ogc": "OGC is belong to percentage number, the value should be less than 100.",
"ter": "TER is belong to percentage number, the value should be less than 100.",
"performance_fee": "Performance fees is belong to percentage number, the value should be less than 100.\nPerformance fees could be negative number, e.g. -0.56"
},
"special_rule": {
"ter": [
"If there are multiple TER value columns, here is the priority rules:",
"- With \"TER with Performance Fee\" and \"Fund TER\", pick up the values from \"TER with Performance Fee\".",
"- With \"TER including Performance Fee\" and \"TER excluding Performance Fee\", pick up the values from \"TER including Performance Fee\".",
"- With both of \"Synthetic TER\" and \"Fund TER\", if \"Synthetic TER\" with value(s), pick up the value(s) from \"Synthetic TER\", otherwise, pick up the value(s) from \"Fund TER\".",
"- With both of \"Net TER (including reimbursement)\" and \"Capped Expense Ratio\", the priority is \"Capped Expense Ratio\", please exclude the column: \"Net TER (including reimbursement)\", only pick up the values from \"Capped Expense Ratio\".",
"Please ignore TER values which with the exception of performance fees or excluded performance fees."
]
}
},
"special_cases": {
"common": [
{
"title": "Latest data with time series data:",
"contents": [
"Some data table is with multiple date columns, please extract the data from the latest date column:",
"- Get dates from column header.",
"- Only extract data from the columns which column header is as the latest date.",
"The latest date-time column usually is the first datapoint value column.",
"Here is the example:",
"performance fees\\nhistorical performance fees\\nhistorical performance fees\\nFrom \\n1 July \\nFrom \\n19 July \\nFrom \\n1 January \\nFrom \\n27 April \\nFrom \\n19 July \\nFrom \\n1 January \\n2021\\nFrom \\n22 May \\n2021\\nFrom \\n16 July \\n2021\\nFrom \\n21 September \\n2021\\nto 30 June 2023\\nto 31 December 2022\\nto 31 December 2021\\nAsia Total Return Fund Class I5 (CHF Hedged) Acc\\n1.73%\\n \\n-1.32%\\n \\n \\n 2.04%\\n \\n \\n \\n",
"The output should be:",
"[{\"fund name\": \"Asia Total Return Fund\", \"share data\": [\"share name\": \"Class I5 (CHF Hedged) Acc\", \"performance fees\": 1.73]},]",
"The keywords are performance fees, the value 1.73 is the first number with the latest date-time."
]
}
],
"ter": [
{
"title": "Combo TER value table:",
"contents": [
"Exist Feeder fund TER and Master fund TER.",
"The relevant table header is like this:",
"Feeder fund (share class)\\nMaster fund\\nTER\\nFeeder\\nTER Master\\nTotal",
"Please output separately as below:",
"- \"feeder fund share class\" and \"TER feeder\" values",
"- \"Master fund\" and \"TER Master\" values",
"Here is the example:",
"Feeder fund (share class)\\nMaster fund\\nTER\\nFeeder\\nTER Master\\nTotal\\nGlobal Portfolio Solution DKK -\\nBalanced Class TI\\nDanske Invest SICAV Global Portfolio\\nSolution Balanced Class X\\n0.1475%\\n0.7025%\\n0.850%\\n",
"The output should be:",
"[{\"fund name\": \"Global Portfolio Solution DKK\", \"share data\": [\"share name\": \"Balanced Class TI\", \"ter\": 0.1475]},{\"fund name\": \"Danske Invest SICAV Global Portfolio Solution DKK\", \"share data\": [\"share name\": \"Balanced Class X\", \"ter\": 0.7025]}]"
]
},
{
"title": "TER reported name priority:",
"contents": [
"If exists both of Expense Ratio and Synthetic total Expense Ratio, please extract the value of Synthetic total Expense Ratio."
]
}
],
"performance_fee": [
{
"title": "Performance fees is part of TER:",
"contents": [
"If exist both of \"TER including performance fees\" and \"TER excluding performance fees\",",
"The TER should be \"TER including performance fees\".",
"The performance fees should be:",
"TER including performance fees - TER excluding performance fees.",
"Here is the example:",
"GAMAX FUNDS FCP\\nClass\\nTER (excluding Performance Fees)\\nTER (including Performance Fees)\\nGAMAX FUNDS - ASIA PACIFIC\\nA\\n2.07%\\n2.07%\\n",
"The output should be:",
"[{\"fund name\": \"GAMAX FUNDS - ASIA PACIFIC\", \"share data\": [\"share name\": \"A\", \"ter\": 2.07, \"performance fees\": 0]}]",
"The performance fees value is TER (including Performance Fees) - TER (excluding Performance Fees) = 2.07 - 2.07 = 0"
]
}
]
},
"output_requirement": {
"common": [
"If possible, please extract fund name, share name, TOR, TER, performance fees, OGC values as the output.",
"The required output items are \"fund name\" and \"share name\".",
"Only output the dasta point which with relevant value.",
"fund level data: (\"fund name\" and \"TOR\") and share level data: (\"fund name\", \"share name\", \"ter\", \"performance fees\", \"ogc\") should be output separately.",
"The output should be JSON format, the format is like below example(s):"
],
"fund_level": [
"[{\"fund name\": \"fund 1\",\"tor\": 35.26},{\"fund name\": \"fund 2\",\"tor\": -28.26},{\"fund name\": \"fund 3\",\"tor\": 115.52,}]"
],
"share_level": {
"fund_name": [
"fund 1",
"fund 2",
"fund 3"
],
"share_name": [
"share 1",
"share 2",
"share 3"
],
"ogc_value": [
0.05,
1.08,
0.17
],
"ter_value": [
1.23,
2.56,
1.16
],
"performance_fee_value": [
0.2,
-0.15,
0.11
]
}
},
"end": [
"Only output JSON data.",
"Don't output the value which not exist in context, especiall for fund level datapoint: TOR.",
"If can't find share class name in context, please output empty JSON data: []"
]
}

250
main.py
View File

@ -8,34 +8,126 @@ from utils.logger import logger
from utils.pdf_download import download_pdf_from_documents_warehouse
from utils.sql_query_util import query_document_fund_mapping
from core.page_filter import FilterPages
from core.data_extraction import DataExtraction
from core.metrics import Metrics
class EMEA_AR_Parsing:
def __init__(self, doc_id: str, pdf_folder: str = r"/data/emea_ar/pdf/") -> None:
def __init__(self,
doc_id: str,
pdf_folder: str = r"/data/emea_ar/pdf/",
output_data_folder: str = r"/data/emea_ar/output/extract_data/docs/") -> None:
self.doc_id = doc_id
self.pdf_folder = pdf_folder
os.makedirs(self.pdf_folder, exist_ok=True)
self.pdf_file = self.download_pdf()
self.document_mapping_info_df = query_document_fund_mapping(doc_id)
self.datapoint_page_info, self.result_details = self.get_datapoint_page_info()
if output_data_folder is None or len(output_data_folder) == 0:
output_data_folder = r"/data/emea_ar/output/extract_data/docs/"
self.output_data_folder = output_data_folder
os.makedirs(self.output_data_folder, exist_ok=True)
self.filter_pages = FilterPages(
self.doc_id, self.pdf_file, self.document_mapping_info_df
)
def download_pdf(self) -> str:
pdf_file = download_pdf_from_documents_warehouse(self.pdf_folder, self.doc_id)
return pdf_file
def get_datapoint_page_info(self) -> dict:
filter_pages = FilterPages(
self.doc_id, self.pdf_file, self.document_mapping_info_df
)
datapoint_page_info, result_details = filter_pages.start_job()
def get_datapoint_page_info(self) -> tuple:
datapoint_page_info, result_details = self.filter_pages.start_job()
return datapoint_page_info, result_details
def extract_data(self, re_run: bool = False) -> list:
if not re_run:
output_data_json_folder = os.path.join(self.output_data_folder, "json/")
os.makedirs(output_data_json_folder, exist_ok=True)
json_file = os.path.join(output_data_json_folder, f"{self.doc_id}.json")
if os.path.exists(json_file):
logger.info(f"The document: {self.doc_id} has been parsed, loading data from {json_file}")
with open(json_file, "r", encoding="utf-8") as f:
data_from_gpt = json.load(f)
return data_from_gpt
page_text_dict = self.filter_pages.page_text_dict
datapoint_page_info, result_details = self.get_datapoint_page_info()
data_extraction = DataExtraction(
self.doc_id,
self.pdf_file,
self.output_data_folder,
page_text_dict,
datapoint_page_info,
self.document_mapping_info_df,
)
data_from_gpt = data_extraction.extract_data()
return data_from_gpt
def filter_pages(doc_id: str, pdf_folder: str) -> None:
logger.info(f"Parsing EMEA AR for doc_id: {doc_id}")
logger.info(f"Filter EMEA AR PDF pages for doc_id: {doc_id}")
emea_ar_parsing = EMEA_AR_Parsing(doc_id, pdf_folder)
return emea_ar_parsing.datapoint_page_info, emea_ar_parsing.result_details
datapoint_page_info, result_details = emea_ar_parsing.get_datapoint_page_info()
return datapoint_page_info, result_details
def extract_data(doc_id: str,
pdf_folder: str,
output_data_folder: str,
re_run: bool = False) -> None:
logger.info(f"Extract EMEA AR data for doc_id: {doc_id}")
emea_ar_parsing = EMEA_AR_Parsing(doc_id, pdf_folder, output_data_folder)
data_from_gpt = emea_ar_parsing.extract_data(re_run)
return data_from_gpt
def batch_extract_data(
pdf_folder: str,
doc_data_excel_file: str = None,
output_child_folder: str = r"/data/emea_ar/output/extract_data/docs/",
output_total_folder: str = r"/data/emea_ar/output/extract_data/total/",
special_doc_id_list: list = None,
re_run: bool = False,
) -> None:
pdf_files = glob(pdf_folder + "*.pdf")
doc_list = []
if special_doc_id_list is not None and len(special_doc_id_list) > 0:
doc_list = special_doc_id_list
if (
len(doc_list) == 0
and doc_data_excel_file is not None
and len(doc_data_excel_file) > 0
and os.path.exists(doc_data_excel_file)
):
doc_data_df = pd.read_excel(doc_data_excel_file)
doc_data_df = doc_data_df[doc_data_df["Checked"] == 1]
doc_list = [str(doc_id) for doc_id in doc_data_df["doc_id"].tolist()]
result_list = []
for pdf_file in tqdm(pdf_files):
pdf_base_name = os.path.basename(pdf_file)
doc_id = pdf_base_name.split(".")[0]
if doc_list is not None and doc_id not in doc_list:
continue
data_from_gpt = extract_data(
doc_id=doc_id,
pdf_folder=pdf_folder,
output_data_folder=output_child_folder,
re_run=re_run
)
result_list.extend(data_from_gpt)
result_df = pd.DataFrame(result_list)
result_df.reset_index(drop=True, inplace=True)
logger.info(f"Saving the result to {output_total_folder}")
os.makedirs(output_total_folder, exist_ok=True)
time_stamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
output_file = os.path.join(
output_total_folder,
f"extract_data_info_{len(pdf_files)}_documents_{time_stamp}.xlsx",
)
with pd.ExcelWriter(output_file) as writer:
result_df.to_excel(writer, index=False, sheet_name="extract_data_info")
def batch_filter_pdf_files(
@ -64,13 +156,15 @@ def batch_filter_pdf_files(
doc_id = pdf_base_name.split(".")[0]
if doc_list is not None and doc_id not in doc_list:
continue
doc_datapoint_page_info, doc_result_details = filter_pages(doc_id=doc_id, pdf_folder=pdf_folder)
doc_datapoint_page_info, doc_result_details = filter_pages(
doc_id=doc_id, pdf_folder=pdf_folder
)
result_list.append(doc_datapoint_page_info)
result_details.extend(doc_result_details)
result_df = pd.DataFrame(result_list)
result_df.reset_index(drop=True, inplace=True)
result_details_df = pd.DataFrame(result_details)
result_details_df.reset_index(drop=True, inplace=True)
@ -83,7 +177,9 @@ def batch_filter_pdf_files(
)
with pd.ExcelWriter(output_file) as writer:
result_df.to_excel(writer, index=False, sheet_name="dp_page_info")
result_details_df.to_excel(writer, index=False, sheet_name="dp_page_info_details")
result_details_df.to_excel(
writer, index=False, sheet_name="dp_page_info_details"
)
if len(special_doc_id_list) == 0:
logger.info(f"Calculating metrics for {output_file}")
@ -103,19 +199,123 @@ def get_metrics(
prediction_file: str,
prediction_sheet_name: str,
ground_truth_file: str,
output_folder: str = None
output_folder: str = None,
) -> None:
metrics = Metrics(
data_type=data_type,
prediction_file=prediction_file,
prediction_sheet_name=prediction_sheet_name,
ground_truth_file=ground_truth_file,
output_folder=output_folder
output_folder=output_folder,
)
missing_error_list, metrics_list, metrics_file = metrics.get_metrics()
return missing_error_list, metrics_list, metrics_file
def test_auto_generate_instructions():
"""
doc_id: str,
pdf_file: str,
page_text_dict: dict,
datapoint_page_info: dict,
document_mapping_info_df: pd.DataFrame
"""
doc_id = "402397014"
pdf_file = f"/data/emea_ar/small_pdf/{doc_id}.pdf"
document_mapping_info_df = query_document_fund_mapping(doc_id)
filter_pages = FilterPages(doc_id, pdf_file, document_mapping_info_df)
page_text_dict = filter_pages.page_text_dict
datapoint_page_info, datapoint_page_info_details = filter_pages.start_job()
datapoint_list = list(datapoint_page_info.keys())
datapoint_list.remove("doc_id")
data_extraction = DataExtraction(
doc_id, pdf_file, page_text_dict, datapoint_page_info, document_mapping_info_df
)
page_index_list = list(page_text_dict.keys())
if len(page_index_list) > 0:
page_text = ""
for datapoint in datapoint_list:
if len(datapoint_page_info[datapoint]) > 0:
page_index_list = datapoint_page_info[datapoint]
page_text = page_text_dict[page_index_list[0]]
break
output_folder = (
r"/data/emea_ar/basic_information/prompts_example/generate_by_config/"
)
os.makedirs(output_folder, exist_ok=True)
tor_instructions_text = data_extraction.get_instructions_by_datapoints(
page_text, ["tor"]
)
with open(
os.path.join(output_folder, "tor_instructions.txt"), "w", encoding="utf-8"
) as f:
f.write(tor_instructions_text)
ter_instructions_text = data_extraction.get_instructions_by_datapoints(
page_text, ["ter"]
)
with open(
os.path.join(output_folder, "ter_instructions.txt"), "w", encoding="utf-8"
) as f:
f.write(ter_instructions_text)
ogc_instructions_text = data_extraction.get_instructions_by_datapoints(
page_text, ["ogc"]
)
with open(
os.path.join(output_folder, "ogc_instructions.txt"), "w", encoding="utf-8"
) as f:
f.write(ogc_instructions_text)
performance_fee_instructions_text = (
data_extraction.get_instructions_by_datapoints(
page_text, ["performance_fee"]
)
)
with open(
os.path.join(output_folder, "performance_fee_instructions.txt"),
"w",
encoding="utf-8",
) as f:
f.write(performance_fee_instructions_text)
ter_ogc_instructions_text = data_extraction.get_instructions_by_datapoints(
page_text, ["ter", "ogc"]
)
with open(
os.path.join(output_folder, "ter_ogc_instructions.txt"),
"w",
encoding="utf-8",
) as f:
f.write(ter_ogc_instructions_text)
ter_performance_fee_instructions_text = (
data_extraction.get_instructions_by_datapoints(
page_text, ["ter", "performance_fee"]
)
)
with open(
os.path.join(output_folder, "ter_performance_fee_instructions.txt"),
"w",
encoding="utf-8",
) as f:
f.write(ter_performance_fee_instructions_text)
ogc_ter_performance_fee_instructions_text = (
data_extraction.get_instructions_by_datapoints(
page_text, ["ogc", "ter", "performance_fee"]
)
)
with open(
os.path.join(output_folder, "ogc_ter_performance_fee_instructions.txt"),
"w",
encoding="utf-8",
) as f:
f.write(ogc_ter_performance_fee_instructions_text)
if __name__ == "__main__":
pdf_folder = r"/data/emea_ar/small_pdf/"
page_filter_ground_truth_file = (
@ -124,12 +324,26 @@ if __name__ == "__main__":
prediction_output_folder = r"/data/emea_ar/output/filter_pages/"
metrics_output_folder = r"/data/emea_ar/output/metrics/"
special_doc_id_list = []
batch_filter_pdf_files(
pdf_folder, page_filter_ground_truth_file, prediction_output_folder, special_doc_id_list
)
# batch_filter_pdf_files(
# pdf_folder, page_filter_ground_truth_file, prediction_output_folder, special_doc_id_list
# )
# data_type = "page_filter"
# prediction_file = r"/data/emea_ar/output/filter_pages/datapoint_page_info_73_documents_20240903145002.xlsx"
# missing_error_list, metrics_list, metrics_file = get_metrics(
# data_type, prediction_file, page_filter_ground_truth_file, metrics_output_folder
# )
# test_auto_generate_instructions()
# doc_id = "294132333"
# extract_data(doc_id, pdf_folder)
output_child_folder = r"/data/emea_ar/output/extract_data/docs/"
output_total_folder = r"/data/emea_ar/output/extract_data/total/"
re_run = False
batch_extract_data(pdf_folder,
page_filter_ground_truth_file,
output_child_folder,
output_total_folder,
special_doc_id_list,
re_run)