migrate name mapping algorithm from Ravi

This commit is contained in:
Blade He 2025-01-21 16:55:08 -06:00
parent d41fae3dba
commit b15d260a58
11 changed files with 2073 additions and 25 deletions

1
.gitignore vendored
View File

@ -13,3 +13,4 @@
/specific_calc_metrics.py /specific_calc_metrics.py
/test_specific_biz_logic.py /test_specific_biz_logic.py
/drilldown_practice.py /drilldown_practice.py
/core/auz_nz/__pycache__/*.pyc

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,717 @@
import pandas as pd
import os
import json
import json_repair
import pandas as pd
import math
import ast
from .string_similarity import get_cosine_similarity, get_jaccard_similarity, get_levenshtien_distance_score
import nltk
from nltk.corpus import stopwords
# from dotenv import load_dotenv
from collections import Counter
import re
from utils.gpt_utils import chat
from utils.logger import logger
# gpt_call = GPTAPI()
# Download the stopwords list if not already downloaded
nltk.download('stopwords')
import json
from openai import AzureOpenAI
# load_dotenv()
# API_KEY = os.getenv("API_KEY")
# MODEL = os.getenv("MODEL")
# END_POINT = os.getenv("END_POINT")
# API_VERSION = os.getenv("API_VERSION")
### STEP 1 - Abbreviation Replacement
ABB_JSON = dict()
def get_abb_json():
global ABB_JSON
with open("abbreviation_records.json", "r") as file:
# Load the JSON and convert keys to lowercase
ABB_JSON = {key.lower(): value for key, value in json.load(file).items()}
def get_abbre_format_str(fundname):
"""Replaces abbreviations in a fund name with their expanded forms."""
# Convert fund name to lowercase while matching
f_list = fundname.lower().split()
updated_doc_fname_words = [ABB_JSON.get(word, word).lower() for word in f_list]
return " ".join(updated_doc_fname_words)
def replace_abbrevs_in_fundnames(fund_names_list):
"""Replaces abbreviations in a list of fund names."""
return [get_abbre_format_str(fund_name) for fund_name in fund_names_list]
### STEP 2 - Remove Stopwords
# Function to clean fund names using NLTK stopwords
def remove_stopwords_nltk(fund_names):
nltk_stopwords = set(stopwords.words('english'))
# Add custom words if necessary (e.g., fund-related stopwords)
custom_stopwords = {'inc', 'fund', 'lp', 'llc', 'plc'}
final_stopwords = nltk_stopwords.union(custom_stopwords)
def stopword_clean(fund_name, stopwords):
words = re.split(r'\W+', fund_name.lower())
filtered_words = [word for word in words if word not in stopwords and word.strip() != '']
cleaned_fund_name = ' '.join(filtered_words).title() # Return cleaned name in title case
return cleaned_fund_name
cleaned_fund_names = [stopword_clean(fund, final_stopwords) for fund in fund_names]
return cleaned_fund_names
### STEP 3 - Special characters removal
def remove_special_characters(fund_group):
fund_names = [re.sub(r'[^a-zA-Z0-9\s]', ' ', txt_fund).strip() for txt_fund in fund_group]
return fund_names
### STEP 4 - Common words removal
def remove_common_words(fund_list, common_words=None):
if len(fund_list)>2 or common_words:
# Step 1: Tokenize the fund names
tokenized_funds = [fund.split() for fund in fund_list]
# Step 2: Count the frequency of each word in the fund names
all_words = [word for sublist in tokenized_funds for word in sublist]
word_counts = Counter(all_words)
if not common_words:
# Step 3: Filter out words that appear in at least 70% of the fund names
threshold = 0.7 * len(fund_list)
common_words = {word for word, count in word_counts.items() if count >= threshold}
common_words = list(common_words)
# Step 4: Remove the common words from each fund name
filtered_funds = []
for fund in fund_list:
# Split the fund name into words and remove common words
filtered_fund = ' '.join([word for word in fund.split() if word not in common_words])
# If removing common words leaves the name empty, retain the original name
if filtered_fund.strip() == '':
filtered_funds.append(fund)
else:
filtered_funds.append(filtered_fund)
else:
filtered_funds = fund_list
return filtered_funds, common_words
### STEP 5 - LLM with Provider
prompt_instruction = """
### Task Overview:
You will be given data in the form of `provider_name` (string), `prediction_fund` (list of strings), and `true_fund` (list of strings). Your task is to match each fund from the `prediction_fund` list to the correct entry in the `true_fund` list. The final output should be a JSON where the keys are funds from `prediction_fund` and the values are the matching funds from `true_fund` or an empty string `""` if no match is found.
### Instructions:
1. Provider Name Handling:
If the same word (like the provider name) appears across multiple `true_fund` entries, it is likely part of the provider's name. In this case, ignore such common words while performing the matching.
Example:
- Input:
`provider_name`: 'Betashares'
`prediction_fund`:
[
"AUS 200",
"AUS CREDIT",
"AUS SUSTAINABLE",
"GLOBAL QUALITY",
"GLOBAL SUSTAINABLE"
]
`true_fund`:
[
"Betashares Australian Sustainability Leaders Fund",
"Betashares Australia 200 Fund",
"Betashares Global Quality Leaders Fund",
"Betashares Australian Investment Grade Corporate Bond Fund",
"Betashares Global Sustainability Leaders Fund"
]
- Output:
```json
{
"AUS 200": "Betashares Australia 200 Fund",
"AUS CREDIT": "",
"AUS SUSTAINABLE": "Betashares Australian Sustainability Leaders Fund",
"GLOBAL QUALITY": "Betashares Global Quality Leaders Fund",
"GLOBAL SUSTAINABLE": "Betashares Global Sustainability Leaders Fund"
}
```
2. Abbreviation Handling:
Some `prediction_fund` entries may use abbreviations or short forms (e.g., "AUS" for "Australia"). Identify and handle these cases by using context from both `prediction_fund` and `true_fund` lists, as shown in the example above. Match abbreviations to the expanded terms where applicable.
3. No Match Cases:
If you cannot find a suitable match for a fund from `prediction_fund` in `true_fund`, leave the match blank by assigning an empty string `""` to that entry. If you are unsure about the correct match, do not make incorrect assumptions leave it blank.
4. Duplicate Mapping Prevention:
Ensure that each true_fund name maps to only one entry in prediction_fund to avoid duplicate mappings. If multiple prediction_fund names appear to match the same true_fund name, perform a detailed word-by-word analysis to determine the closest match based on content and context. Only map one prediction_fund name to each true_fund name, and if no strong match is found, leave it blank (""). Avoid making assumptions if clarity is lacking.
### Example Input and Output:
- Sample 1:
- Input:
`provider_name`: 'ANZ'
`prediction_fund`:
[
"Cash Fund",
"Conservative Fund",
"Conservative Balanced Fund",
"Balanced Fund"
]
`true_fund`:
[
"ANZ KiwiSaver High Growth Fund",
"ANZ KiwiSaver Conservative",
"ANZ KiwiSaver Conservative Balanced",
"ANZ KiwiSaver Balanced Growth",
"ANZ KiwiSaver Growth",
"ANZ KiwiSaver Cash"
]
- Output:
```json
{
"Cash Fund": "ANZ KiwiSaver Cash",
"Conservative Fund": "ANZ KiwiSaver Conservative",
"Conservative Balanced Fund": "ANZ KiwiSaver Conservative Balanced",
"Balanced Fund": ""
}
```
- Sample 2:
- Input:
`provider_name`: 'iShare'
`prediction_fund`:
[
"iShares Wholesale Screened International Equity Index Fund (Class E Units)",
"iShares Wholesale Australian Bond Index Fund (Class E Units)",
"iShares ESG Australian Bond Index Fund (Class E Units)",
"iShares Wholesale Australian Equity Index Fund (Class E Units)",
"iShares Wholesale Australian Listed Property Index Fund (Class E Units)",
"iShares Global Listed Property Index Fund (Hedged Class E Units)",
"iShares Wholesale International Equity Index Fund (Class E Units)",
"iShares Hedged International Equity Index Fund (Class E Units)",
"iShares ESG Global Bond Index Fund (Class E Units)",
"iShares Global Bond Index Fund (Class E Units)"
]
`true_fund`:
[
"iShares Wholesale Indexed Australian Bond Fund",
"iShares Global Bond Index Fund",
"iShares Australian Listed Property Index Fund",
"iShares Emerging Markets IMI Equity Index Fund",
"iShares International Equity Index (Hgd)",
"iShares Wholesale Australian Equity Index Fund",
"iShares Screened Wholesale International Equity Index Fund"
]
- Output:
```json
{
"iShares Wholesale Screened International Equity Index Fund (Class E Units)": "iShares Screened Wholesale International Equity Index Fund",
"iShares Wholesale Australian Bond Index Fund (Class E Units)": "iShares Wholesale Indexed Australian Bond Fund",
"iShares ESG Australian Bond Index Fund (Class E Units)": "",
"iShares Wholesale Australian Equity Index Fund (Class E Units)": "iShares Wholesale Australian Equity Index Fund",
"iShares Wholesale Australian Listed Property Index Fund (Class E Units)": "iShares Australian Listed Property Index Fund",
"iShares Global Listed Property Index Fund (Hedged Class E Units)": "",
"iShares Wholesale International Equity Index Fund (Class E Units)": "",
"iShares Hedged International Equity Index Fund (Class E Units)": "iShares International Equity Index (Hgd)",
"iShares ESG Global Bond Index Fund (Class E Units)": "",
"iShares Global Bond Index Fund (Class E Units)": "iShares Global Bond Index Fund"
}
```
- Sample 3:
- Input:
`provider_name`: 'Coolabah Capital Investments'
`prediction_fund`:
[
"Coolabah Short Term Income PIE Fund",
"Coolabah Long-Short Credit PIE Fund"
]
`true_fund`:
[
"Coolabah Long-Short Credit PIE Fund",
"Coolabah Short Term Income PIE Fund"
]
- Output:
```json
{
"Coolabah Short Term Income PIE Fund": "Coolabah Short Term Income PIE Fund",
"Coolabah Long-Short Credit PIE Fund": "Coolabah Long-Short Credit PIE Fund"
}
```
Context:
"""
system_prompt = "You are helpful AI Data Analyst which helps to identify the data to get the information correctly. Read instruction carefully and provide the information accordingly into json format only."
parameters = {
"temperature": 0,
"max_tokens": 1000,
}
### Similarity methods
cosine_threshold = 0.9
levenshtien_threshold = 0.98
jaccard_thresold = 0.95
def get_cosine_score(fund_list, pred_fund_name):
matched_result = {}
matched_index = 0
for fund_db_name in fund_list:
score = get_cosine_similarity(pred_fund_name, fund_db_name)
matched_result.update({fund_db_name:score})
if len(matched_result)>0:
max_key = max(matched_result, key=matched_result.get)
matched_index = list(matched_result.keys()).index(max_key)
matched_result = {max_key: matched_result[max_key]}
return matched_result, matched_index
def get_jaccard_score(fund_list, pred_fund_name):
matched_result = {}
matched_index = 0
for fund_db_name in fund_list:
score = get_jaccard_similarity(pred_fund_name, fund_db_name)
matched_result.update({fund_db_name:score})
if len(matched_result)>0:
max_key = max(matched_result, key=matched_result.get)
matched_index = list(matched_result.keys()).index(max_key)
matched_result = {max_key: matched_result[max_key]}
return matched_result, matched_index
def get_levenshtien_score(fund_list, pred_fund_name):
matched_result = {}
matched_index = 0
for fund_db_name in fund_list:
score = get_levenshtien_distance_score(pred_fund_name, fund_db_name)
matched_result.update({fund_db_name:score})
if len(matched_result)>0:
max_key = max(matched_result, key=matched_result.get)
matched_index = list(matched_result.keys()).index(max_key)
matched_result = {max_key: matched_result[max_key]}
return matched_result, matched_index
def get_fund_match_final_score(fund_list, pred_fund_name):
cosine_score_ = ""
jaccard_score_ = ""
levenstein_score_ = ""
cosine_value_name_ = ""
jaccard_value_name_ = ""
levenstein_value_name_ = ""
# print("-> get_fund_match_final_score: ", fund_list, pred_fund_name)
# Get scores and matched indices for each similarity metric
cosine_fund_score, cosine_matched_index = get_cosine_score(fund_list, pred_fund_name)
# print("cosine_fund_score, cosine_matched_index: ", cosine_fund_score, cosine_matched_index)
jaccard_fund_score, jaccard_matched_index = get_jaccard_score(fund_list, pred_fund_name)
# print("jaccard_fund_score, jaccard_matched_index: ", jaccard_fund_score, jaccard_matched_index)
levenshtien_fund_score, levenshtein_matched_index = get_levenshtien_score(fund_list, pred_fund_name)
# print("levenshtien_fund_score, levenshtein_matched_index: ", levenshtien_fund_score, levenshtein_matched_index)
final_result = {}
matched_index = 0
# Calculate the cosine score
if cosine_fund_score:
cosine_score_ = list(cosine_fund_score.values())[0]
cosine_value_name_ = list(cosine_fund_score.keys())[0]
if cosine_score_ >= cosine_threshold:
final_result = cosine_fund_score
matched_index = cosine_matched_index
# Calculate the jaccard score
if jaccard_fund_score:
jaccard_score_ = list(jaccard_fund_score.values())[0]
jaccard_value_name_ = list(jaccard_fund_score.keys())[0]
if jaccard_score_ >= jaccard_thresold and not final_result:
final_result = jaccard_fund_score
matched_index = jaccard_matched_index
# Calculate the levenshtein score
if levenshtien_fund_score:
levenstein_score_ = list(levenshtien_fund_score.values())[0]
levenstein_value_name_ = list(levenshtien_fund_score.keys())[0]
if levenstein_score_ >= levenshtien_threshold and not final_result:
final_result = levenshtien_fund_score
matched_index = levenshtein_matched_index
# Collect all scores, defaulting to the highest available match if all are equal
all_scores_ = [cosine_score_, jaccard_score_, levenstein_score_]
all_prediction_names_ = [cosine_value_name_, jaccard_value_name_, levenstein_value_name_]
return final_result, matched_index, all_scores_, all_prediction_names_
### Format Response
def format_response(doc_id, pred_fund, db_fund, clean_pred_name, clean_db_name,
step0_pred_name=None, step0_db_name=None,
step0_matched_db_name_cosine = None, step0_matched_db_name_jacc = None, step0_matched_db_name_leven = None,
step0_cosine=None, step0_jaccard=None, step0_levenshtein=None,
step1_pred_name=None, step1_db_name=None,
step1_matched_db_name_cosine = None, step1_matched_db_name_jacc = None, step1_matched_db_name_leven = None,
step1_cosine=None, step1_jaccard=None, step1_levenshtein=None,
step2_pred_name=None, step2_db_name=None,
step2_matched_db_name_cosine = None, step2_matched_db_name_jacc = None, step2_matched_db_name_leven = None,
step2_cosine=None, step2_jaccard=None, step2_levenshtein=None,
step3_pred_name=None, step3_db_name=None,
step3_matched_db_name_cosine = None, step3_matched_db_name_jacc = None, step3_matched_db_name_leven = None,
step3_cosine=None, step3_jaccard=None, step3_levenshtein=None,
step4_pred_name=None, step4_db_name=None,
step4_matched_db_name_cosine = None, step4_matched_db_name_jacc = None, step4_matched_db_name_leven = None,
step4_cosine=None, step4_jaccard=None, step4_levenshtein=None,
llm_flag=None,llm_clean_pred_list=None, llm_clean_db_list=None, llm_pred_fund=None, llm_matched_db_name=None, llm_result=None):
dt = {
'doc_id': doc_id,
'pred_fund': pred_fund,
'db_fund': db_fund,
'cleaned_pred_fund_name': clean_pred_name,
'cleaned_db_fund_name': clean_db_name,
'step0_pred_name': step0_pred_name,
'step0_db_name': step0_db_name,
'step0_matched_db_name_cosine': step0_matched_db_name_cosine,
'step0_matched_db_name_jacc': step0_matched_db_name_jacc,
'step0_matched_db_name_levenstn': step0_matched_db_name_leven,
'step0_cosine': step0_cosine,
'step0_jaccard': step0_jaccard,
'step0_levenshtein': step0_levenshtein,
'step1_pred_name': step1_pred_name,
'step1_db_name': step1_db_name,
'step1_matched_db_name_cosine': step1_matched_db_name_cosine,
'step1_matched_db_name_jacc': step1_matched_db_name_jacc,
'step1_matched_db_name_levenstn': step1_matched_db_name_leven,
'step1_cosine': step1_cosine,
'step1_jaccard': step1_jaccard,
'step1_levenshtein': step1_levenshtein,
'step2_pred_name': step2_pred_name,
'step2_db_name': step2_db_name,
'step2_matched_db_name_cosine': step2_matched_db_name_cosine,
'step2_matched_db_name_jacc': step2_matched_db_name_jacc,
'step2_matched_db_name_levenstn': step2_matched_db_name_leven,
'step2_cosine': step2_cosine,
'step2_jaccard': step2_jaccard,
'step2_levenshtein': step2_levenshtein,
'step3_pred_name': step3_pred_name,
'step3_db_name': step3_db_name,
'step3_matched_db_name_cosine': step3_matched_db_name_cosine,
'step3_matched_db_name_jacc': step3_matched_db_name_jacc,
'step3_matched_db_name_levenstn': step3_matched_db_name_leven,
'step3_cosine': step3_cosine,
'step3_jaccard': step3_jaccard,
'step3_levenshtein': step3_levenshtein,
'step4_pred_name': step4_pred_name,
'step4_db_name': step4_db_name,
'step4_matched_db_name_cosine': step4_matched_db_name_cosine,
'step4_matched_db_name_jacc': step4_matched_db_name_jacc,
'step4_matched_db_name_levenstn': step4_matched_db_name_leven,
'step4_cosine': step4_cosine,
'step4_jaccard': step4_jaccard,
'step4_levenshtein': step4_levenshtein,
'llm_flag': llm_flag,
'llm_clean_pred_list': llm_clean_pred_list,
'llm_clean_db_list': llm_clean_db_list,
'llm_pred_fund': llm_pred_fund,
'llm_matched_db_name': llm_matched_db_name,
'llm_result': llm_result
}
return dt
def final_function_to_match(doc_id, pred_list, db_list, provider_name):
final_result = {}
df_data = []
unmatched_pred_list = pred_list.copy()
unmatched_db_list = db_list.copy()
for index, pred_fund in enumerate(pred_list):
# print("\n -->> pred_fund: ",pred_fund, index)
try:
### STEP-0 RAW Test
raw_result, matched_index, all_scores_, all_matched_fund_names_ = get_fund_match_final_score(db_list, pred_fund)
# print("RAW STEP: ",raw_result)
if len(raw_result)>0:
final_result.update({pred_list[index]: db_list[matched_index]})
df_data.append(format_response(doc_id, pred_list[index], db_list[matched_index], pred_fund, list(raw_result.keys())[0],
step0_pred_name=pred_fund, step0_db_name=db_list,
step0_matched_db_name_cosine= all_matched_fund_names_[0], step0_matched_db_name_jacc= all_matched_fund_names_[1], step0_matched_db_name_leven= all_matched_fund_names_[2],
step0_cosine=all_scores_[0], step0_jaccard=all_scores_[1], step0_levenshtein=all_scores_[2],
llm_flag=False))
unmatched_db_list.remove(db_list[matched_index])
unmatched_pred_list.remove(pred_list[index])
else:
### STEP-1 Abbreviation replacement
cleaned_pred_name1 = replace_abbrevs_in_fundnames([pred_fund])[0]
cleaned_db_list1 = replace_abbrevs_in_fundnames(db_list)
# print("--> ",cleaned_db_list1, cleaned_pred_name1)
step1_result, matched_index, all_scores1_, all_matched_fund_names1_ = get_fund_match_final_score(cleaned_db_list1, cleaned_pred_name1)
# print(f"\nStep 1 - Abbreviation Replacement Result: {step1_result}")
# print(f"Cleaned Pred Name: {cleaned_pred_name1, cleaned_db_list1}")
# print(f"Matched Index: {matched_index}, All Scores: {all_scores1_}, All Matched Fund Names: {all_matched_fund_names1_}")
if len(step1_result)>0:
final_result.update({pred_list[index]: db_list[matched_index]})
df_data.append(format_response(doc_id, pred_list[index], db_list[matched_index], cleaned_pred_name1, list(step1_result.keys())[0],
step0_pred_name=pred_fund, step0_db_name=db_list,
step0_matched_db_name_cosine= all_matched_fund_names_[0], step0_matched_db_name_jacc= all_matched_fund_names_[1], step0_matched_db_name_leven= all_matched_fund_names_[2],
step0_cosine=all_scores_[0], step0_jaccard=all_scores_[1], step0_levenshtein=all_scores_[2],
step1_pred_name=cleaned_pred_name1, step1_db_name=cleaned_db_list1,
step1_matched_db_name_cosine= all_matched_fund_names1_[0], step1_matched_db_name_jacc= all_matched_fund_names1_[1], step1_matched_db_name_leven= all_matched_fund_names1_[2],
step1_cosine=all_scores1_[0], step1_jaccard=all_scores1_[1], step1_levenshtein=all_scores1_[2], llm_flag=False))
unmatched_db_list.remove(db_list[matched_index])
unmatched_pred_list.remove(pred_list[index])
else:
### STEP-2 Remove Stopwords
cleaned_pred_name2 = remove_stopwords_nltk([cleaned_pred_name1])[0]
cleaned_db_list2 = remove_stopwords_nltk(cleaned_db_list1)
# print("--> ",cleaned_db_list2, cleaned_pred_name2)
step2_result, matched_index, all_scores2_, all_matched_fund_names2_ = get_fund_match_final_score(cleaned_db_list2, cleaned_pred_name2)
# print(f"\nStep 2 - Remove Stopwords Result: {step2_result}")
# print(f"Cleaned Pred Name: {cleaned_pred_name2, cleaned_db_list2}")
# print(f"Matched Index: {matched_index}, All Scores: {all_scores2_}, All Matched Fund Names: {all_matched_fund_names2_}")
if len(step2_result)>0:
final_result.update({pred_list[index]: db_list[matched_index]})
df_data.append(format_response(doc_id, pred_list[index], db_list[matched_index], cleaned_pred_name2, list(step2_result.keys())[0],
step0_pred_name=pred_fund, step0_db_name=db_list,
step0_matched_db_name_cosine= all_matched_fund_names_[0], step0_matched_db_name_jacc= all_matched_fund_names_[1], step0_matched_db_name_leven= all_matched_fund_names_[2],
step0_cosine=all_scores_[0], step0_jaccard=all_scores_[1], step0_levenshtein=all_scores_[2],
step1_pred_name=cleaned_pred_name1, step1_db_name=cleaned_db_list1,
step1_matched_db_name_cosine= all_matched_fund_names1_[0], step1_matched_db_name_jacc= all_matched_fund_names1_[1], step1_matched_db_name_leven= all_matched_fund_names1_[2],
step1_cosine=all_scores1_[0], step1_jaccard=all_scores1_[1], step1_levenshtein=all_scores1_[2],
step2_pred_name=cleaned_pred_name2, step2_db_name=cleaned_db_list2,
step2_matched_db_name_cosine= all_matched_fund_names2_[0], step2_matched_db_name_jacc= all_matched_fund_names2_[1], step2_matched_db_name_leven= all_matched_fund_names2_[2],
step2_cosine=all_scores2_[0], step2_jaccard=all_scores2_[1], step2_levenshtein=all_scores2_[2],llm_flag=False))
unmatched_db_list.remove(db_list[matched_index])
unmatched_pred_list.remove(pred_list[index])
else:
### STEP-3 Special Character Removal
cleaned_pred_name3 = remove_special_characters([cleaned_pred_name2])[0]
cleaned_db_list3 = remove_special_characters(cleaned_db_list2)
# print("--> ",cleaned_db_list3, cleaned_pred_name3)
step3_result, matched_index, all_scores3_, all_matched_fund_names3_ = get_fund_match_final_score(cleaned_db_list3, cleaned_pred_name3)
# print(f"\nStep 3 - Special Character Removal Result: {step3_result}")
# print(f"Cleaned Pred Name: {cleaned_pred_name3, cleaned_db_list3}")
# print(f"Matched Index: {matched_index}, All Scores: {all_scores3_}, All Matched Fund Names: {all_matched_fund_names3_}")
if len(step3_result)>0:
final_result.update({pred_list[index]: db_list[matched_index]})
df_data.append(format_response(doc_id, pred_list[index], db_list[matched_index], cleaned_pred_name3, list(step3_result.keys())[0], step0_pred_name=pred_fund, step0_db_name=db_list,
step0_matched_db_name_cosine= all_matched_fund_names_[0], step0_matched_db_name_jacc= all_matched_fund_names_[1], step0_matched_db_name_leven= all_matched_fund_names_[2],
step0_cosine=all_scores_[0], step0_jaccard=all_scores_[1], step0_levenshtein=all_scores_[2],
step1_pred_name=cleaned_pred_name1, step1_db_name=cleaned_db_list1,
step1_matched_db_name_cosine= all_matched_fund_names1_[0], step1_matched_db_name_jacc= all_matched_fund_names1_[1], step1_matched_db_name_leven= all_matched_fund_names1_[2],
step1_cosine=all_scores1_[0], step1_jaccard=all_scores1_[1], step1_levenshtein=all_scores1_[2],
step2_pred_name=cleaned_pred_name2, step2_db_name=cleaned_db_list2,
step2_matched_db_name_cosine= all_matched_fund_names2_[0], step2_matched_db_name_jacc= all_matched_fund_names2_[1], step2_matched_db_name_leven= all_matched_fund_names2_[2],
step2_cosine=all_scores2_[0], step2_jaccard=all_scores2_[1], step2_levenshtein=all_scores2_[2],
step3_pred_name=cleaned_pred_name3, step3_db_name=cleaned_db_list3,
step3_matched_db_name_cosine= all_matched_fund_names3_[0], step3_matched_db_name_jacc= all_matched_fund_names3_[1], step3_matched_db_name_leven= all_matched_fund_names3_[2],
step3_cosine=all_scores3_[0], step3_jaccard=all_scores3_[1], step3_levenshtein=all_scores3_[2],llm_flag=False))
unmatched_db_list.remove(db_list[matched_index])
unmatched_pred_list.remove(pred_list[index])
else:
### STEP-4 Common Words Removal
cleaned_db_list4, _ = remove_common_words(cleaned_db_list3)
# print("cleaned_db_list4 : ",cleaned_db_list4)
cleaned_pred_list, _ = remove_common_words(pred_list)
cleaned_pred_name4 = cleaned_pred_list[index]
# print("cleaned_pred_name4: ",cleaned_pred_name4)
step4_result, matched_index, all_scores4_, all_matched_fund_names4_ = get_fund_match_final_score(cleaned_db_list4, cleaned_pred_name4)
# print(f"\nStep 4 - Common Words Removal Result: {step4_result}")
# print(f"Cleaned Pred Name: {cleaned_pred_name4, cleaned_db_list4}")
# print(f"Matched Index: {matched_index}, All Scores: {all_scores4_}, All Matched Fund Names: {all_matched_fund_names4_}")
if len(step4_result)>0:
final_result.update({pred_list[index]: db_list[matched_index]})
df_data.append(format_response(doc_id, pred_list[index], db_list[matched_index], cleaned_pred_name4,
list(step4_result.keys())[0],
step0_pred_name=pred_fund, step0_db_name=db_list,
step0_matched_db_name_cosine= all_matched_fund_names_[0], step0_matched_db_name_jacc= all_matched_fund_names_[1], step0_matched_db_name_leven= all_matched_fund_names_[2],
step0_cosine=all_scores_[0], step0_jaccard=all_scores_[1], step0_levenshtein=all_scores_[2],
step1_pred_name=cleaned_pred_name1, step1_db_name=cleaned_db_list1,
step1_matched_db_name_cosine= all_matched_fund_names1_[0], step1_matched_db_name_jacc= all_matched_fund_names1_[1], step1_matched_db_name_leven= all_matched_fund_names1_[2],
step1_cosine=all_scores1_[0], step1_jaccard=all_scores1_[1], step1_levenshtein=all_scores1_[2],
step2_pred_name=cleaned_pred_name2, step2_db_name=cleaned_db_list2,
step2_matched_db_name_cosine= all_matched_fund_names2_[0], step2_matched_db_name_jacc= all_matched_fund_names2_[1], step2_matched_db_name_leven= all_matched_fund_names2_[2],
step2_cosine=all_scores2_[0], step2_jaccard=all_scores2_[1], step2_levenshtein=all_scores2_[2],
step3_pred_name=cleaned_pred_name3, step3_db_name=cleaned_db_list3,
step3_matched_db_name_cosine= all_matched_fund_names3_[0], step3_matched_db_name_jacc= all_matched_fund_names3_[1], step3_matched_db_name_leven= all_matched_fund_names3_[2],
step3_cosine=all_scores3_[0], step3_jaccard=all_scores3_[1], step3_levenshtein=all_scores3_[2],
step4_pred_name=cleaned_pred_name4, step4_db_name=cleaned_db_list4,
step4_matched_db_name_cosine= all_matched_fund_names4_[0], step4_matched_db_name_jacc= all_matched_fund_names4_[1], step4_matched_db_name_leven= all_matched_fund_names4_[2],
step4_cosine=all_scores4_[0], step4_jaccard=all_scores4_[1], step4_levenshtein=all_scores4_[2],
llm_flag=False))
# print("unmatched_db_list: ",unmatched_db_list)
# print("unmatched_pred_list: ",unmatched_pred_list)
# print("db_list[matched_index]: ",db_list[matched_index])
# print("pred_list[index]: ",pred_list[index])
unmatched_db_list.remove(db_list[matched_index])
unmatched_pred_list.remove(pred_list[index])
else:
df_data.append(format_response(doc_id, pred_list[index], db_list[matched_index], cleaned_pred_name4,
db_list[matched_index],
step0_pred_name=pred_fund, step0_db_name=db_list,
step0_matched_db_name_cosine= all_matched_fund_names_[0], step0_matched_db_name_jacc= all_matched_fund_names_[1], step0_matched_db_name_leven= all_matched_fund_names_[2],
step0_cosine=all_scores_[0], step0_jaccard=all_scores_[1], step0_levenshtein=all_scores_[2],
step1_pred_name=cleaned_pred_name1, step1_db_name=cleaned_db_list1,
step1_matched_db_name_cosine= all_matched_fund_names1_[0], step1_matched_db_name_jacc= all_matched_fund_names1_[1], step1_matched_db_name_leven= all_matched_fund_names1_[2],
step1_cosine=all_scores1_[0], step1_jaccard=all_scores1_[1], step1_levenshtein=all_scores1_[2],
step2_pred_name=cleaned_pred_name2, step2_db_name=cleaned_db_list2,
step2_matched_db_name_cosine= all_matched_fund_names2_[0], step2_matched_db_name_jacc= all_matched_fund_names2_[1], step2_matched_db_name_leven= all_matched_fund_names2_[2],
step2_cosine=all_scores2_[0], step2_jaccard=all_scores2_[1], step2_levenshtein=all_scores2_[2],
step3_pred_name=cleaned_pred_name3, step3_db_name=cleaned_db_list3,
step3_matched_db_name_cosine= all_matched_fund_names3_[0], step3_matched_db_name_jacc= all_matched_fund_names3_[1], step3_matched_db_name_leven= all_matched_fund_names3_[2],
step3_cosine=all_scores3_[0], step3_jaccard=all_scores3_[1], step3_levenshtein=all_scores3_[2],
step4_pred_name=cleaned_pred_name4, step4_db_name=cleaned_db_list4,
step4_matched_db_name_cosine= all_matched_fund_names4_[0], step4_matched_db_name_jacc= all_matched_fund_names4_[1], step4_matched_db_name_leven= all_matched_fund_names4_[2],
step4_cosine=all_scores4_[0], step4_jaccard=all_scores4_[1], step4_levenshtein=all_scores4_[2],
llm_flag=True))
except Exception as e:
print("Error: ",e)
# print("==>>> DB LIST: ",unmatched_db_list)
# print("==>>> PRED LIST: ",unmatched_pred_list)
if len(unmatched_pred_list)!=0:
cleaned_unmatched_pred_list = replace_abbrevs_in_fundnames(unmatched_pred_list)
cleaned_unmatched_pred_list = remove_stopwords_nltk(cleaned_unmatched_pred_list)
cleaned_unmatched_pred_list = remove_special_characters(cleaned_unmatched_pred_list)
cleaned_unmatched_db_list = replace_abbrevs_in_fundnames(unmatched_db_list)
cleaned_unmatched_db_list = remove_stopwords_nltk(cleaned_unmatched_db_list)
cleaned_unmatched_db_list = remove_special_characters(cleaned_unmatched_db_list)
prompt_context = f"""
{prompt_instruction}
provider_name: {provider_name}
prediction_fund:
{cleaned_unmatched_pred_list}
true_fund:
{cleaned_unmatched_db_list}
"""
# print(f"\ncleaned_unmatched_pred_list: ",cleaned_unmatched_pred_list)
# print(f"cleaned_unmatched_db_list: ",cleaned_unmatched_db_list)
# llm_response = get_llm_response(prompt_context)
llm_response, with_error = chat(
prompt=prompt_context, system_prompt=system_prompt, response_format={"type": "json_object"}
)
# logger.info(f"fund matching LLM Response: {llm_response}")
if 'response' in llm_response.keys():
try:
llm_result = json.loads(llm_response['response'])
except:
try:
llm_result = json_repair.loads(llm_response['response'])
except:
llm_result = {}
# try:
# llm_result = ast.literal_eval(llm_response['response'].replace('\n',''))
# except Exception as e:
# logger.info(f"error: {e}")
# cleaned_response = llm_response['response'].strip("```json").strip("```").replace('\n', '')
# llm_result = json.loads(cleaned_response)
# logger.info(f"\n\n llm_result: {llm_result}")
for k,v in llm_result.items():
# print("k: ",k)
# print("v: ",v)
og_db_index=-1
og_pred_index = -1
if k in cleaned_unmatched_pred_list:
og_pred_index = cleaned_unmatched_pred_list.index(k)
if og_pred_index == -1:
# sometimes, the raw name and db name reversed from the LLM response
if v in cleaned_unmatched_pred_list and k in cleaned_unmatched_db_list:
og_pred_index = cleaned_unmatched_pred_list.index(v)
og_db_index = cleaned_unmatched_db_list.index(k)
# v and k are swapped
temp = v
v = k
k = temp
if og_pred_index==-1:
continue
# og_db_index = cleaned_unmatched_db_list.index(v)
if og_db_index == -1 and v in cleaned_unmatched_db_list:
og_db_index = cleaned_unmatched_db_list.index(v)
# print("og_db_index: ",og_db_index, cleaned_unmatched_db_list)
# print("unmatched_db_list: ",unmatched_db_list)
for i in df_data:
if i['pred_fund']==unmatched_pred_list[og_pred_index]:
if og_db_index!=-1:
i['db_fund']=unmatched_db_list[og_db_index]
i['cleaned_db_fund_name'] = v
final_result.update({unmatched_pred_list[og_pred_index]:unmatched_db_list[og_db_index]})
else:
i['db_fund'] = ''
i['cleaned_db_fund_name'] = ''
final_result.update({unmatched_pred_list[og_pred_index]:""})
i['llm_clean_pred_list'] = cleaned_unmatched_pred_list
i['llm_clean_db_list'] = cleaned_unmatched_db_list,
i['llm_pred_fund'] = k
i['llm_matched_db_name'] = v
i['llm_result'] = llm_result
break
# break
return final_result
def api_for_fund_matching_call(doc_id, api_response, providerName, all_investment_db_names):
result = api_response['data']
doc_fund_names = [item['fund_name'] for item in result]
db_fund_names = all_investment_db_names.split(';')
for item in result:
item['result']['matched_db_fund_name'] = ''
item['result']['doc_fund_name'] = item['fund_name']
item['result']['fund_name_matched'] = 'False'
if len(doc_fund_names)>0 and len(db_fund_names)>0:
fund_match_result = final_function_to_match(doc_id, doc_fund_names, db_fund_names, providerName)
print("fund_match results: ", fund_match_result)
for k,v in fund_match_result.items():
if v:
for item in result:
if k==item['fund_name']:
item['fund_name'] = v
item['result']['matched_db_fund_name'] = v
item['result']['doc_fund_name'] = k
item['result']['fund_name_matched'] = 'True'
api_response['data'] = result
return api_response
# pred_list = ['Bond Fund', 'California Tax Free Income Fund', 'John Hancock Bond Fund', 'John Hancock California Tax Free Income Fund', 'John Hancock California Municipal Bond Fund', 'John Hancock Esg Core Bond Fund', 'John Hancock Government Income Fund', 'John Hancock High Yield Fund', 'John Hancock High Yield Municipal Bond Fund', 'John Hancock Income Fund', 'John Hancock Investment Grade Bond Fund', 'John Hancock Municipal Opportunities Fund', 'John Hancock Sovereign Bond Fund', 'John Hancock Short Duration Bond Fund', 'John Hancock Short Duration Municipal Opportunities Fund']
# db_list = ['JHancock Bond Fund', 'JHancock CA Municipal Bond Fund', 'JHancock ESG Core Bond Fund', 'JHancock Government Income Fund', 'JHancock High Yield Fund', 'JHancock High Yield Municipal Bond Fund', 'JHancock Income Fund', 'JHancock Investment Grade Bond Fund', 'JHancock Municipal Opportunities Fund', 'JHancock Short Dur Muncpl Opps Fd', 'JHancock Short Duration Bond Fund']
# provider_name = "John Hancock"
# doc = 123
# result = final_function_to_match(doc, pred_list, db_list, provider_name)
# print("\nresult: ",result)

9
core/auz_nz/readme.md Normal file
View File

@ -0,0 +1,9 @@
# Hybrid Solution Files Description
### There are the 3 files that will be used to perform Fund Matching Task.
- `abbreviation_records.json`: JSON files with abbreviations.
- `string_similarity.py`: Written logics to match the strings using Cosine, Levenshtien and Jaccard.
- `hybrid_solution_script.py`: Code for pre-processing and Hybrid solution model. 'final_function_to_match' is the function to use to get fund mapping.

View File

@ -0,0 +1,77 @@
import math
import re
from collections import Counter
from fuzzywuzzy import fuzz
WORD = re.compile(r"\w+")
def text_to_vector(text):
words = WORD.findall(text)
return Counter(words)
def get_cosine_similarity(str1: str, str2: str):
"""
Calculate the cosine similarity between two strings.
"""
try:
vec1 = text_to_vector(str1.lower())
vec2 = text_to_vector(str2.lower())
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x] ** 2 for x in list(vec1.keys())])
sum2 = sum([vec2[x] ** 2 for x in list(vec2.keys())])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
except Exception as e:
# print("error: ",e)
pass
return 0.0
def get_ngrams(text, n):
# Remove spaces and create a list of bigrams
text = text.replace(" ", "") # Remove spaces
return {text[i : i + n] for i in range(len(text) - 1)}
def get_jaccard_similarity(str1: str, str2: str) -> float:
"""
Calculate the jaccard similarity between two strings.
"""
try:
# Generate bigrams for each string
str1 = str1.lower()
str2 = str2.lower()
set1 = set(get_ngrams(str1, 2)) # Bigrams for str1
set2 = set(get_ngrams(str2, 2)) # Bigrams for str2
# Calculate intersection and union
intersection = len(set1.intersection(set2))
union = len(set1.union(set2))
# Compute Jaccard similarity
return intersection / union if union != 0 else 0.0
except Exception as e:
# print("error: ",e)
pass
return 0.0
def get_levenshtien_distance_score(str1: str, str2: str) -> float:
"""
Calculate the levenshtein distance score between two strings.
"""
try:
str1 = str1.lower()
str2 = str2.lower()
similarity_score = fuzz.ratio(str1, str2)
try:
return similarity_score / 100
except ZeroDivisionError as e:
return 0.0
except Exception as e:
# print("error: ",e)
pass
return 0.0

View File

@ -495,9 +495,10 @@ class DataExtraction:
exclude_data, exclude_data,
extract_way="text" extract_way="text"
) )
response, with_error = chat( result, with_error = chat(
instructions, response_format={"type": "json_object"} prompt=instructions, response_format={"type": "json_object"}
) )
response = result.get("response", "")
if with_error: if with_error:
logger.error(f"Error in extracting tables from page") logger.error(f"Error in extracting tables from page")
data_dict = {"doc_id": self.doc_id} data_dict = {"doc_id": self.doc_id}
@ -508,6 +509,9 @@ class DataExtraction:
data_dict["raw_answer"] = response data_dict["raw_answer"] = response
data_dict["extract_data"] = {"data": []} data_dict["extract_data"] = {"data": []}
data_dict["extract_way"] = original_way data_dict["extract_way"] = original_way
data_dict["prompt_token"] = result.get("prompt_token", 0)
data_dict["completion_token"] = result.get("completion_token", 0)
data_dict["total_token"] = result.get("total_token", 0)
return data_dict return data_dict
try: try:
data = json.loads(response) data = json.loads(response)
@ -539,6 +543,9 @@ class DataExtraction:
data_dict["raw_answer"] = response data_dict["raw_answer"] = response
data_dict["extract_data"] = data data_dict["extract_data"] = data
data_dict["extract_way"] = original_way data_dict["extract_way"] = original_way
data_dict["prompt_token"] = result.get("prompt_token", 0)
data_dict["completion_token"] = result.get("completion_token", 0)
data_dict["total_token"] = result.get("total_token", 0)
return data_dict return data_dict
def extract_data_by_page_image( def extract_data_by_page_image(
@ -566,6 +573,9 @@ class DataExtraction:
data_dict["raw_answer"] = "" data_dict["raw_answer"] = ""
data_dict["extract_data"] = {"data": []} data_dict["extract_data"] = {"data": []}
data_dict["extract_way"] = "image" data_dict["extract_way"] = "image"
data_dict["prompt_token"] = 0
data_dict["completion_token"] = 0
data_dict["total_token"] = 0
return data_dict return data_dict
else: else:
if previous_page_last_fund is not None and len(previous_page_last_fund) > 0: if previous_page_last_fund is not None and len(previous_page_last_fund) > 0:
@ -610,9 +620,10 @@ class DataExtraction:
exclude_data=exclude_data, exclude_data=exclude_data,
extract_way="image" extract_way="image"
) )
response, with_error = chat( result, with_error = chat(
instructions, response_format={"type": "json_object"}, image_base64=image_base64 prompt=instructions, response_format={"type": "json_object"}, image_base64=image_base64
) )
response = result.get("response", "")
if with_error: if with_error:
logger.error(f"Error in extracting tables from page") logger.error(f"Error in extracting tables from page")
data_dict = {"doc_id": self.doc_id} data_dict = {"doc_id": self.doc_id}
@ -623,6 +634,9 @@ class DataExtraction:
data_dict["raw_answer"] = response data_dict["raw_answer"] = response
data_dict["extract_data"] = {"data": []} data_dict["extract_data"] = {"data": []}
data_dict["extract_way"] = "image" data_dict["extract_way"] = "image"
data_dict["prompt_token"] = result.get("prompt_token", 0)
data_dict["completion_token"] = result.get("completion_token", 0)
data_dict["total_token"] = result.get("total_token", 0)
return data_dict return data_dict
try: try:
data = json.loads(response) data = json.loads(response)
@ -644,15 +658,19 @@ class DataExtraction:
data_dict["raw_answer"] = response data_dict["raw_answer"] = response
data_dict["extract_data"] = data data_dict["extract_data"] = data
data_dict["extract_way"] = "image" data_dict["extract_way"] = "image"
data_dict["prompt_token"] = result.get("prompt_token", 0)
data_dict["completion_token"] = result.get("completion_token", 0)
data_dict["total_token"] = result.get("total_token", 0)
return data_dict return data_dict
def get_image_text(self, page_num: int) -> str: def get_image_text(self, page_num: int) -> str:
image_base64 = self.get_pdf_image_base64(page_num) image_base64 = self.get_pdf_image_base64(page_num)
instructions = self.instructions_config.get("get_image_text", "\n") instructions = self.instructions_config.get("get_image_text", "\n")
logger.info(f"Get text from image of page {page_num}") logger.info(f"Get text from image of page {page_num}")
response, with_error = chat( result, with_error = chat(
instructions, response_format={"type": "json_object"}, image_base64=image_base64 prompt=instructions, response_format={"type": "json_object"}, image_base64=image_base64
) )
response = result.get("response", "")
text = "" text = ""
if with_error: if with_error:
logger.error(f"Can't get text from current image") logger.error(f"Can't get text from current image")

View File

@ -7,6 +7,7 @@ from utils.sql_query_util import (
query_investment_by_provider, query_investment_by_provider,
) )
from utils.logger import logger from utils.logger import logger
from core.auz_nz.hybrid_solution_script import final_function_to_match
class DataMapping: class DataMapping:
@ -51,6 +52,7 @@ class DataMapping:
self.doc_share_name_list = [] self.doc_share_name_list = []
self.doc_fund_mapping = pd.DataFrame() self.doc_fund_mapping = pd.DataFrame()
self.doc_fund_class_mapping = pd.DataFrame() self.doc_fund_class_mapping = pd.DataFrame()
self.provider_name = ""
else: else:
self.doc_fund_name_list = ( self.doc_fund_name_list = (
self.document_mapping_info_df["FundName"].unique().tolist() self.document_mapping_info_df["FundName"].unique().tolist()
@ -64,6 +66,7 @@ class DataMapping:
self.doc_fund_class_mapping = self.document_mapping_info_df[ self.doc_fund_class_mapping = self.document_mapping_info_df[
["FundId", "SecId", "ShareClassName", "CurrencyId"] ["FundId", "SecId", "ShareClassName", "CurrencyId"]
].drop_duplicates() ].drop_duplicates()
self.provider_name = self.document_mapping_info_df["ProviderName"].values[0]
logger.info("Setting provider mapping data") logger.info("Setting provider mapping data")
self.provider_mapping_df = self.get_provider_mapping() self.provider_mapping_df = self.get_provider_mapping()
@ -100,6 +103,128 @@ class DataMapping:
provider_mapping_df.reset_index(drop=True, inplace=True) provider_mapping_df.reset_index(drop=True, inplace=True)
return provider_mapping_df return provider_mapping_df
def mapping_raw_data_entrance(self):
if self.doc_source == "emear_ar":
return self.mapping_raw_data()
elif self.doc_source == "aus_prospectus":
return self.mapping_raw_data_aus()
else:
return self.mapping_raw_data()
def mapping_raw_data_aus(self):
logger.info(f"Mapping raw data for AUS Prospectus document {self.doc_id}")
mapped_data_list = []
# Generate raw name based on fund name and share name by integrate_share_name
fund_raw_name_list = []
share_raw_name_list = []
for page_data in self.raw_document_data_list:
doc_id = page_data.get("doc_id", "")
page_index = page_data.get("page_index", "")
raw_data_list = page_data.get("extract_data", {}).get("data", [])
for raw_data in raw_data_list:
raw_fund_name = raw_data.get("fund_name", "")
if raw_fund_name is None or len(raw_fund_name) == 0:
continue
raw_share_name = raw_data.get("share_name", "")
raw_data_keys = list(raw_data.keys())
if len(raw_share_name) > 0:
integrated_share_name = self.integrate_share_name(raw_fund_name, raw_share_name)
if integrated_share_name not in share_raw_name_list:
share_raw_name_list.append(integrated_share_name)
for datapoint in self.datapoints:
if datapoint in raw_data_keys:
mapped_data = {
"doc_id": doc_id,
"page_index": page_index,
"raw_fund_name": raw_fund_name,
"raw_share_name": raw_share_name,
"raw_name": integrated_share_name,
"datapoint": datapoint,
"value": raw_data[datapoint],
"investment_type": 1,
"investment_id": "",
"investment_name": "",
"similarity": 0
}
mapped_data_list.append(mapped_data)
else:
if raw_fund_name not in fund_raw_name_list:
fund_raw_name_list.append(raw_fund_name)
for datapoint in self.datapoints:
if datapoint in raw_data_keys:
mapped_data = {
"doc_id": doc_id,
"page_index": page_index,
"raw_fund_name": raw_fund_name,
"raw_share_name": "",
"raw_name": raw_fund_name,
"datapoint": datapoint,
"value": raw_data[datapoint],
"investment_type": 33,
"investment_id": "",
"investment_name": ""
}
mapped_data_list.append(mapped_data)
# Mapping raw data with database
iter_count = 30
fund_match_result = {}
if len(fund_raw_name_list) > 0:
fund_match_result = self.get_raw_name_db_match_result(fund_raw_name_list, "fund", iter_count)
logger.info(f"Fund match result: \n{fund_match_result}")
share_match_result = {}
if len(share_raw_name_list) > 0:
share_match_result = self.get_raw_name_db_match_result(share_raw_name_list, "share", iter_count)
logger.info(f"Share match result: \n{share_match_result}")
for mapped_data in mapped_data_list:
investment_type = mapped_data["investment_type"]
raw_name = mapped_data["raw_name"]
if investment_type == 33:
if fund_match_result.get(raw_name) is not None:
matched_db_fund_name = fund_match_result[raw_name]
if matched_db_fund_name is not None and len(matched_db_fund_name) > 0:
# get FundId from self.doc_fund_mapping
find_fund_df = self.doc_fund_mapping[self.doc_fund_mapping["FundName"] == matched_db_fund_name]
if find_fund_df is not None and len(find_fund_df) > 0:
fund_id = find_fund_df["FundId"].values[0]
mapped_data["investment_id"] = fund_id
mapped_data["investment_name"] = matched_db_fund_name
mapped_data["similarity"] = 1
if investment_type == 1:
if share_match_result.get(raw_name) is not None:
matched_db_share_name = share_match_result[raw_name]
if matched_db_share_name is not None and len(matched_db_share_name) > 0:
# get SecId from self.doc_fund_class_mapping
find_share_df = self.doc_fund_class_mapping[self.doc_fund_class_mapping["ShareClassName"] == matched_db_share_name]
if find_share_df is not None and len(find_share_df) > 0:
share_id = find_share_df["SecId"].values[0]
mapped_data["investment_id"] = share_id
mapped_data["investment_name"] = matched_db_share_name
mapped_data["similarity"] = 1
self.output_mapping_file(mapped_data_list)
return mapped_data_list
def get_raw_name_db_match_result(self, raw_name_list, investment_type: str, iter_count: int = 30):
# split raw_name_list into several parts which each part is with 30 elements
# The reason to split is to avoid invoke token limitation issues from CahtGPT
raw_name_list_parts = [raw_name_list[i:i + iter_count]
for i in range(0, len(raw_name_list), iter_count)]
all_match_result = {}
for raw_name_list in raw_name_list_parts:
if investment_type == "fund":
match_result = final_function_to_match(doc_id=self.doc_id,
pred_list=raw_name_list,
db_list=self.doc_fund_name_list,
provider_name=self.provider_name)
else:
match_result = final_function_to_match(doc_id=self.doc_id,
pred_list=raw_name_list,
db_list=self.doc_share_name_list,
provider_name=self.provider_name)
all_match_result.update(match_result)
return all_match_result
def mapping_raw_data(self): def mapping_raw_data(self):
""" """
doc_id, page_index, datapoint, value, doc_id, page_index, datapoint, value,
@ -218,6 +343,10 @@ class DataMapping:
} }
mapped_data_list.append(mapped_data) mapped_data_list.append(mapped_data)
self.output_mapping_file(mapped_data_list)
return mapped_data_list
def output_mapping_file(self, mapped_data_list: list):
json_data_file = os.path.join( json_data_file = os.path.join(
self.output_data_json_folder, f"{self.doc_id}.json" self.output_data_json_folder, f"{self.doc_id}.json"
) )
@ -240,8 +369,6 @@ class DataMapping:
except Exception as e: except Exception as e:
logger.error(f"Failed to save excel file: {e}") logger.error(f"Failed to save excel file: {e}")
return mapped_data_list
def integrate_share_name(self, raw_fund_name: str, raw_share_name: str): def integrate_share_name(self, raw_fund_name: str, raw_share_name: str):
raw_name = "" raw_name = ""
if raw_share_name is not None and len(raw_share_name) > 0: if raw_share_name is not None and len(raw_share_name) > 0:

View File

@ -51,9 +51,10 @@ class Translate_PDF:
instructions = f"Context: \n{text}\n\nInstructions: Translate the contex in {self.target_language}. \n" instructions = f"Context: \n{text}\n\nInstructions: Translate the contex in {self.target_language}. \n"
instructions += "Please output the translated text in the following JSON format: {\"translated_text\": \"translated text\"} \n\n" instructions += "Please output the translated text in the following JSON format: {\"translated_text\": \"translated text\"} \n\n"
instructions += "Answer: \n" instructions += "Answer: \n"
response, with_error = chat( result, with_error = chat(
instructions, response_format={"type": "json_object"} prompt=instructions, response_format={"type": "json_object"}
) )
response = result.get("response", "")
try: try:
data = json.loads(response) data = json.loads(response)
except: except:

29
main.py
View File

@ -16,6 +16,7 @@ from utils.biz_utils import add_slash_to_text_as_regex
from core.page_filter import FilterPages from core.page_filter import FilterPages
from core.data_extraction import DataExtraction from core.data_extraction import DataExtraction
from core.data_mapping import DataMapping from core.data_mapping import DataMapping
from core.auz_nz.hybrid_solution_script import api_for_fund_matching_call
from core.metrics import Metrics from core.metrics import Metrics
@ -277,8 +278,9 @@ class EMEA_AR_Parsing:
data_from_gpt, data_from_gpt,
self.document_mapping_info_df, self.document_mapping_info_df,
self.output_mapping_data_folder, self.output_mapping_data_folder,
self.doc_source
) )
return data_mapping.mapping_raw_data() return data_mapping.mapping_raw_data_entrance()
def filter_pages(doc_id: str, pdf_folder: str, doc_source: str) -> None: def filter_pages(doc_id: str, pdf_folder: str, doc_source: str) -> None:
@ -402,6 +404,7 @@ def batch_start_job(
pdf_folder: str = "/data/emea_ar/pdf/", pdf_folder: str = "/data/emea_ar/pdf/",
output_pdf_text_folder: str = r"/data/emea_ar/output/pdf_text/", output_pdf_text_folder: str = r"/data/emea_ar/output/pdf_text/",
doc_data_excel_file: str = None, doc_data_excel_file: str = None,
document_mapping_file: str = None,
output_extract_data_child_folder: str = r"/data/emea_ar/output/extract_data/docs/", output_extract_data_child_folder: str = r"/data/emea_ar/output/extract_data/docs/",
output_mapping_child_folder: str = r"/data/emea_ar/output/mapping_data/docs/", output_mapping_child_folder: str = r"/data/emea_ar/output/mapping_data/docs/",
output_extract_data_total_folder: str = r"/data/emea_ar/output/extract_data/total/", output_extract_data_total_folder: str = r"/data/emea_ar/output/extract_data/total/",
@ -499,6 +502,16 @@ def batch_start_job(
writer, index=False, sheet_name="extract_data" writer, index=False, sheet_name="extract_data"
) )
if document_mapping_file is not None and len(document_mapping_file) > 0 and os.path.exists(document_mapping_file):
try:
merged_total_data_folder = os.path.join(output_mapping_total_folder, "merged/")
os.makedirs(merged_total_data_folder, exist_ok=True)
data_file_base_name = os.path.basename(output_file)
output_merged_data_file_path = os.path.join(merged_total_data_folder, "merged_" + data_file_base_name)
merge_output_data_aus_prospectus(output_file, document_mapping_file, output_merged_data_file_path)
except Exception as e:
logger.error(f"Error: {e}")
if calculate_metrics: if calculate_metrics:
prediction_sheet_name = "total_mapping_data" prediction_sheet_name = "total_mapping_data"
ground_truth_file = r"/data/emea_ar/ground_truth/data_extraction/mapping_data_info_73_documents.xlsx" ground_truth_file = r"/data/emea_ar/ground_truth/data_extraction/mapping_data_info_73_documents.xlsx"
@ -989,6 +1002,7 @@ def batch_run_documents(
doc_source: str = "emea_ar", doc_source: str = "emea_ar",
special_doc_id_list: list = None, special_doc_id_list: list = None,
pdf_folder: str = r"/data/emea_ar/pdf/", pdf_folder: str = r"/data/emea_ar/pdf/",
document_mapping_file: str = None,
output_pdf_text_folder: str = r"/data/emea_ar/output/pdf_text/", output_pdf_text_folder: str = r"/data/emea_ar/output/pdf_text/",
output_extract_data_child_folder: str = r"/data/emea_ar/output/extract_data/docs/", output_extract_data_child_folder: str = r"/data/emea_ar/output/extract_data/docs/",
output_extract_data_total_folder: str = r"/data/emea_ar/output/extract_data/total/", output_extract_data_total_folder: str = r"/data/emea_ar/output/extract_data/total/",
@ -1001,8 +1015,8 @@ def batch_run_documents(
page_filter_ground_truth_file = ( page_filter_ground_truth_file = (
r"/data/emea_ar/ground_truth/page_filter/datapoint_page_info_88_documents.xlsx" r"/data/emea_ar/ground_truth/page_filter/datapoint_page_info_88_documents.xlsx"
) )
re_run_extract_data = True re_run_extract_data = False
re_run_mapping_data = True re_run_mapping_data = False
force_save_total_data = True force_save_total_data = True
calculate_metrics = False calculate_metrics = False
@ -1027,6 +1041,7 @@ def batch_run_documents(
pdf_folder, pdf_folder,
output_pdf_text_folder, output_pdf_text_folder,
page_filter_ground_truth_file, page_filter_ground_truth_file,
document_mapping_file,
output_extract_data_child_folder, output_extract_data_child_folder,
output_mapping_child_folder, output_mapping_child_folder,
output_extract_data_total_folder, output_extract_data_total_folder,
@ -1046,6 +1061,7 @@ def batch_run_documents(
pdf_folder, pdf_folder,
output_pdf_text_folder, output_pdf_text_folder,
page_filter_ground_truth_file, page_filter_ground_truth_file,
document_mapping_file,
output_extract_data_child_folder, output_extract_data_child_folder,
output_mapping_child_folder, output_mapping_child_folder,
output_extract_data_total_folder, output_extract_data_total_folder,
@ -1178,7 +1194,7 @@ def merge_output_data_aus_prospectus(
): ):
# TODO: merge output data for aus prospectus, plan to realize it on 2025-01-16 # TODO: merge output data for aus prospectus, plan to realize it on 2025-01-16
data_df = pd.read_excel(data_file_path, sheet_name="total_mapping_data") data_df = pd.read_excel(data_file_path, sheet_name="total_mapping_data")
document_mapping_df = pd.read_excel(document_mapping_file, sheet_name="Sheet1") document_mapping_df = pd.read_excel(document_mapping_file, sheet_name="document_mapping")
# set doc_id to be string type # set doc_id to be string type
data_df["doc_id"] = data_df["doc_id"].astype(str) data_df["doc_id"] = data_df["doc_id"].astype(str)
document_mapping_df["DocumentId"] = document_mapping_df["DocumentId"].astype(str) document_mapping_df["DocumentId"] = document_mapping_df["DocumentId"].astype(str)
@ -1337,6 +1353,7 @@ if __name__ == "__main__":
document_sample_file = r"./sample_documents/aus_prospectus_100_documents_multi_fund_sample.txt" document_sample_file = r"./sample_documents/aus_prospectus_100_documents_multi_fund_sample.txt"
with open(document_sample_file, "r", encoding="utf-8") as f: with open(document_sample_file, "r", encoding="utf-8") as f:
special_doc_id_list = [doc_id.strip() for doc_id in f.readlines()] special_doc_id_list = [doc_id.strip() for doc_id in f.readlines()]
document_mapping_file = r"/data/aus_prospectus/basic_information/from_2024_documents/aus_100_document_prospectus_multi_fund.xlsx"
# special_doc_id_list: list = [ # special_doc_id_list: list = [
# "539790009", # "539790009",
# "542300403", # "542300403",
@ -1350,7 +1367,7 @@ if __name__ == "__main__":
# "555377021", # "555377021",
# "555654388", # "555654388",
# ] # ]
# special_doc_id_list: list = ["554851189"] # special_doc_id_list: list = ["534287518"]
pdf_folder: str = r"/data/aus_prospectus/pdf/" pdf_folder: str = r"/data/aus_prospectus/pdf/"
output_pdf_text_folder: str = r"/data/aus_prospectus/output/pdf_text/" output_pdf_text_folder: str = r"/data/aus_prospectus/output/pdf_text/"
output_extract_data_child_folder: str = ( output_extract_data_child_folder: str = (
@ -1366,10 +1383,12 @@ if __name__ == "__main__":
r"/data/aus_prospectus/output/mapping_data/total/" r"/data/aus_prospectus/output/mapping_data/total/"
) )
drilldown_folder = r"/data/aus_prospectus/output/drilldown/" drilldown_folder = r"/data/aus_prospectus/output/drilldown/"
batch_run_documents( batch_run_documents(
doc_source=doc_source, doc_source=doc_source,
special_doc_id_list=special_doc_id_list, special_doc_id_list=special_doc_id_list,
pdf_folder=pdf_folder, pdf_folder=pdf_folder,
document_mapping_file=document_mapping_file,
output_pdf_text_folder=output_pdf_text_folder, output_pdf_text_folder=output_pdf_text_folder,
output_extract_data_child_folder=output_extract_data_child_folder, output_extract_data_child_folder=output_extract_data_child_folder,
output_extract_data_total_folder=output_extract_data_total_folder, output_extract_data_total_folder=output_extract_data_total_folder,

View File

@ -12,3 +12,5 @@ openpyxl==3.1.2
XlsxWriter==3.1.2 XlsxWriter==3.1.2
tiktoken==0.7.0 tiktoken==0.7.0
beautifulsoup4==4.12.3 beautifulsoup4==4.12.3
fuzzywuzzy==0.18.0
nltk==3.9.1

View File

@ -10,10 +10,6 @@ import dotenv
# loads .env file with your OPENAI_API_KEY # loads .env file with your OPENAI_API_KEY
dotenv.load_dotenv() dotenv.load_dotenv()
# tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
tokenizer = tiktoken.get_encoding("cl100k_base")
def get_embedding(text, engine=os.getenv("EMBEDDING_ENGINE")): def get_embedding(text, engine=os.getenv("EMBEDDING_ENGINE")):
count = 0 count = 0
error = "" error = ""
@ -32,6 +28,7 @@ def get_embedding(text, engine=os.getenv("EMBEDDING_ENGINE")):
def num_tokens_from_string(string: str) -> int: def num_tokens_from_string(string: str) -> int:
tokenizer = tiktoken.get_encoding("cl100k_base")
"""Returns the number of tokens in a text string.""" """Returns the number of tokens in a text string."""
num_tokens = len(tokenizer.encode(string)) num_tokens = len(tokenizer.encode(string))
return num_tokens return num_tokens
@ -64,6 +61,7 @@ def num_tokens_from_messages(messages, model="gpt-35-turbo-16k"):
def chat( def chat(
prompt: str, prompt: str,
system_prompt: str = None,
engine=os.getenv("Engine_GPT4o"), engine=os.getenv("Engine_GPT4o"),
azure_endpoint=os.getenv("OPENAI_API_BASE_GPT4o"), azure_endpoint=os.getenv("OPENAI_API_BASE_GPT4o"),
api_key=os.getenv("OPENAI_API_KEY_GPT4o"), api_key=os.getenv("OPENAI_API_KEY_GPT4o"),
@ -104,11 +102,14 @@ def chat(
] ]
else: else:
messages = [{"role": "user", "content": prompt}] messages = [{"role": "user", "content": prompt}]
if system_prompt is not None and len(system_prompt) > 0:
messages.insert(0, {"role": "system", "content": system_prompt})
count = 0 count = 0
error = "" result = {}
request_timeout = 600 request_timeout = 600
while count < 8: while count < 8:
response = None
try: try:
if count > 0: if count > 0:
print(f"retrying the {count} time...") print(f"retrying the {count} time...")
@ -139,15 +140,25 @@ def chat(
response_format=response_format, response_format=response_format,
) )
sleep(1) sleep(1)
return response.choices[0].message.content, False result["full_response"] = response
result["response"] = response.choices[0].message.content
result["prompt_token"] = response.usage.prompt_tokens
result["completion_token"] = response.usage.completion_tokens
result["total_token"] = response.usage.total_tokens
return result, False
except Exception as e: except Exception as e:
error = str(e) error = str(e)
print(f"error message: {error}") print(f"error message: {error}")
if "maximum context length" in error: if "maximum context length" in error:
return error, True result["full_response"] = response
result["response"] = error
result["prompt_token"] = response.usage.prompt_tokens
result["completion_token"] = response.usage.completion_tokens
result["total_token"] = response.usage.total_tokens
return result, True
count += 1 count += 1
sleep(2) sleep(2)
return error, True return result, True
def encode_image(image_path: str): def encode_image(image_path: str):