148 lines
5.2 KiB
Python
148 lines
5.2 KiB
Python
import requests
|
|
import json
|
|
import os
|
|
from bs4 import BeautifulSoup
|
|
import time
|
|
from time import sleep
|
|
from datetime import datetime
|
|
import pytz
|
|
import pandas as pd
|
|
import dashscope
|
|
import dotenv
|
|
import base64
|
|
dotenv.load_dotenv()
|
|
|
|
|
|
ali_api_key = os.getenv("ALI_API_KEY_QWEN")
|
|
|
|
|
|
def chat(
|
|
prompt: str,
|
|
text_model: str = "qwen-plus",
|
|
image_model: str = "qwen-vl-plus",
|
|
image_file: str = None,
|
|
image_base64: str = None,
|
|
enable_search: bool = False,
|
|
):
|
|
try:
|
|
token = 0
|
|
if (
|
|
image_base64 is None
|
|
and image_file is not None
|
|
and len(image_file) > 0
|
|
and os.path.exists(image_file)
|
|
):
|
|
image_base64 = encode_image(image_file)
|
|
|
|
use_image_model = False
|
|
if image_base64 is not None and len(image_base64) > 0:
|
|
use_image_model = True
|
|
messages = [
|
|
{
|
|
"role": "user",
|
|
"content": [
|
|
{"text": prompt},
|
|
{
|
|
"image": f"data:image/png;base64,{image_base64}",
|
|
},
|
|
],
|
|
}
|
|
]
|
|
count = 0
|
|
while count < 3:
|
|
try:
|
|
print(f"调用阿里云Qwen模型, 次数: {count + 1}")
|
|
response = dashscope.MultiModalConversation.call(
|
|
api_key=ali_api_key,
|
|
model=image_model,
|
|
messages=messages,
|
|
)
|
|
if response.status_code == 200:
|
|
break
|
|
else:
|
|
print(f"调用阿里云Qwen模型失败: {response.code} {response.message}")
|
|
count += 1
|
|
sleep(2)
|
|
except Exception as e:
|
|
print(f"调用阿里云Qwen模型失败: {e}")
|
|
count += 1
|
|
sleep(2)
|
|
if response.status_code == 200:
|
|
image_text = (
|
|
response.get("output", {})
|
|
.get("choices", [])[0]
|
|
.get("message", {})
|
|
.get("content", "")
|
|
)
|
|
temp_image_text = ""
|
|
if isinstance(image_text, list):
|
|
for item in image_text:
|
|
if isinstance(item, dict):
|
|
temp_image_text += item.get("text", "") + "\n\n"
|
|
elif isinstance(item, str):
|
|
temp_image_text += item + "\n\n"
|
|
else:
|
|
pass
|
|
response_contents = temp_image_text.strip()
|
|
token = response.get("usage", {}).get("total_tokens", 0)
|
|
else:
|
|
response_contents = f"{response.code} {response.message} 无法分析图片"
|
|
token = 0
|
|
else:
|
|
messages = [{"role": "user", "content": prompt}]
|
|
count = 0
|
|
while count < 3:
|
|
try:
|
|
print(f"调用阿里云Qwen模型, 次数: {count + 1}")
|
|
response = dashscope.Generation.call(
|
|
api_key=ali_api_key,
|
|
model=text_model,
|
|
messages=messages,
|
|
enable_search=enable_search,
|
|
search_options={"forced_search": enable_search}, # 强制联网搜索
|
|
result_format="message",
|
|
)
|
|
if response.status_code == 200:
|
|
break
|
|
else:
|
|
print(f"调用阿里云Qwen模型失败: {response.code} {response.message}")
|
|
count += 1
|
|
sleep(2)
|
|
except Exception as e:
|
|
print(f"调用阿里云Qwen模型失败: {e}")
|
|
count += 1
|
|
sleep(2)
|
|
|
|
# 获取response的token
|
|
if response.status_code == 200:
|
|
response_contents = (
|
|
response.get("output", {})
|
|
.get("choices", [])[0]
|
|
.get("message", {})
|
|
.get("content", "")
|
|
)
|
|
token = response.get("usage", {}).get("total_tokens", 0)
|
|
else:
|
|
response_contents = f"{response.code} {response.message}"
|
|
token = 0
|
|
result = {}
|
|
if use_image_model:
|
|
result["model"] = image_model
|
|
else:
|
|
result["model"] = text_model
|
|
result["response"] = response_contents
|
|
result["prompt_token"] = response.get("usage", {}).get("input_tokens", 0)
|
|
result["completion_token"] = response.get("usage", {}).get("output_tokens", 0)
|
|
result["total_token"] = token
|
|
sleep(2)
|
|
return result, False
|
|
except Exception as e:
|
|
print(f"调用阿里云Qwen模型失败: {e}")
|
|
return {}, True
|
|
|
|
|
|
def encode_image(image_path: str):
|
|
if image_path is None or len(image_path) == 0 or not os.path.exists(image_path):
|
|
return None
|
|
with open(image_path, "rb") as image_file:
|
|
return base64.b64encode(image_file.read()).decode("utf-8") |