Appearance
第70天:国外平台深度对比
学习目标
- 了解OpenAI ChatGPT
- 掌握Google Gemini
- 学习Anthropic Claude
- 理解Meta LLaMA
- 掌握平台特点对比
OpenAI ChatGPT
平台概述
python
class OpenAIChatGPT:
def __init__(self, api_key: str):
self.api_key = api_key
self.base_url = "https://api.openai.com/v1/chat/completions"
def chat(self, messages: List[Dict],
model: str = "gpt-4o") -> Dict:
import requests
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
payload = {
"model": model,
"messages": messages
}
response = requests.post(
self.base_url,
json=payload,
headers=headers
)
return response.json()
def stream_chat(self, messages: List[Dict],
model: str = "gpt-4o"):
import requests
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
payload = {
"model": model,
"messages": messages,
"stream": True
}
response = requests.post(
self.base_url,
json=payload,
headers=headers,
stream=True
)
for line in response.iter_lines():
if line:
yield line.decode("utf-8")
def create_embedding(self, text: str,
model: str = "text-embedding-3-small") -> Dict:
import requests
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
payload = {
"model": model,
"input": text
}
response = requests.post(
"https://api.openai.com/v1/embeddings",
json=payload,
headers=headers
)
return response.json()
def create_image(self, prompt: str,
model: str = "dall-e-3") -> Dict:
import requests
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
payload = {
"model": model,
"prompt": prompt,
"n": 1,
"size": "1024x1024"
}
response = requests.post(
"https://api.openai.com/v1/images/generations",
json=payload,
headers=headers
)
return response.json()平台特点
python
class OpenAIPlatformFeatures:
def __init__(self):
self.features = {
"model_variety": {
"description": "模型多样性",
"models": [
"GPT-4o",
"GPT-4-Turbo",
"GPT-3.5-Turbo",
"GPT-4-Vision",
"DALL-E-3"
],
"score": 5
},
"performance": {
"description": "性能表现",
"metrics": {
"response_time": "< 1s",
"accuracy": "极高",
"context_length": "128K"
},
"score": 5
},
"cost": {
"description": "成本",
"pricing": {
"GPT-4o": "$0.005/1K tokens",
"GPT-4-Turbo": "$0.01/1K tokens",
"GPT-3.5-Turbo": "$0.0005/1K tokens"
},
"score": 3
},
"ecosystem": {
"description": "生态支持",
"features": [
"SDK支持(Python, Node.js等)",
"API文档完善",
"社区极其活跃",
"企业级支持"
],
"score": 5
},
"compliance": {
"description": "合规性",
"certifications": [
"SOC2",
"ISO27001",
"GDPR"
],
"score": 4
}
}
def get_feature(self, feature_name: str) -> Dict:
return self.features.get(feature_name, {})
def get_all_features(self) -> Dict:
return self.features
def calculate_overall_score(self) -> float:
scores = [feature["score"] for feature in self.features.values()]
return sum(scores) / len(scores)Google Gemini
平台概述
python
class GoogleGemini:
def __init__(self, api_key: str):
self.api_key = api_key
self.base_url = "https://generativelanguage.googleapis.com/v1beta/models"
def chat(self, messages: List[Dict],
model: str = "gemini-1.5-pro") -> Dict:
import requests
url = f"{self.base_url}/{model}:generateContent?key={self.api_key}"
headers = {
"Content-Type": "application/json"
}
payload = {
"contents": [
{
"role": message["role"],
"parts": [{"text": message["content"]}]
}
for message in messages
]
}
response = requests.post(url, json=payload, headers=headers)
return response.json()
def stream_chat(self, messages: List[Dict],
model: str = "gemini-1.5-pro"):
import requests
url = f"{self.base_url}/{model}:streamGenerateContent?key={self.api_key}"
headers = {
"Content-Type": "application/json"
}
payload = {
"contents": [
{
"role": message["role"],
"parts": [{"text": message["content"]}]
}
for message in messages
]
}
response = requests.post(url, json=payload, headers=headers, stream=True)
for line in response.iter_lines():
if line:
yield line.decode("utf-8")
def create_embedding(self, text: str,
model: str = "embedding-001") -> Dict:
import requests
url = f"{self.base_url}/{model}:embedContent?key={self.api_key}"
headers = {
"Content-Type": "application/json"
}
payload = {
"content": {
"parts": [{"text": text}]
}
}
response = requests.post(url, json=payload, headers=headers)
return response.json()平台特点
python
class GooglePlatformFeatures:
def __init__(self):
self.features = {
"model_variety": {
"description": "模型多样性",
"models": [
"Gemini-1.5-Pro",
"Gemini-1.5-Flash",
"Gemini-1.0-Pro",
"Gemini-Ultra"
],
"score": 5
},
"performance": {
"description": "性能表现",
"metrics": {
"response_time": "< 1s",
"accuracy": "极高",
"context_length": "1M"
},
"score": 5
},
"cost": {
"description": "成本",
"pricing": {
"Gemini-1.5-Pro": "$0.0035/1K tokens",
"Gemini-1.5-Flash": "$0.00015/1K tokens",
"Gemini-1.0-Pro": "$0.0005/1K tokens"
},
"score": 4
},
"ecosystem": {
"description": "生态支持",
"features": [
"SDK支持(Python, Node.js等)",
"API文档完善",
"社区活跃",
"企业级支持"
],
"score": 5
},
"compliance": {
"description": "合规性",
"certifications": [
"SOC2",
"ISO27001",
"GDPR"
],
"score": 4
}
}
def get_feature(self, feature_name: str) -> Dict:
return self.features.get(feature_name, {})
def get_all_features(self) -> Dict:
return self.features
def calculate_overall_score(self) -> float:
scores = [feature["score"] for feature in self.features.values()]
return sum(scores) / len(scores)Anthropic Claude
平台概述
python
class AnthropicClaude:
def __init__(self, api_key: str):
self.api_key = api_key
self.base_url = "https://api.anthropic.com/v1/messages"
def chat(self, messages: List[Dict],
model: str = "claude-3-5-sonnet-20241022") -> Dict:
import requests
headers = {
"x-api-key": self.api_key,
"anthropic-version": "2023-06-01",
"Content-Type": "application/json"
}
payload = {
"model": model,
"max_tokens": 4096,
"messages": messages
}
response = requests.post(
self.base_url,
json=payload,
headers=headers
)
return response.json()
def stream_chat(self, messages: List[Dict],
model: str = "claude-3-5-sonnet-20241022"):
import requests
headers = {
"x-api-key": self.api_key,
"anthropic-version": "2023-06-01",
"Content-Type": "application/json"
}
payload = {
"model": model,
"max_tokens": 4096,
"messages": messages,
"stream": True
}
response = requests.post(
self.base_url,
json=payload,
headers=headers,
stream=True
)
for line in response.iter_lines():
if line:
yield line.decode("utf-8")平台特点
python
class AnthropicPlatformFeatures:
def __init__(self):
self.features = {
"model_variety": {
"description": "模型多样性",
"models": [
"Claude-3.5-Sonnet",
"Claude-3.5-Haiku",
"Claude-3-Opus",
"Claude-3-Sonnet"
],
"score": 4
},
"performance": {
"description": "性能表现",
"metrics": {
"response_time": "< 1s",
"accuracy": "极高",
"context_length": "200K"
},
"score": 5
},
"cost": {
"description": "成本",
"pricing": {
"Claude-3.5-Sonnet": "$0.003/1K tokens",
"Claude-3.5-Haiku": "$0.0008/1K tokens",
"Claude-3-Opus": "$0.015/1K tokens"
},
"score": 4
},
"ecosystem": {
"description": "生态支持",
"features": [
"SDK支持(Python, TypeScript等)",
"API文档完善",
"社区活跃",
"企业级支持"
],
"score": 4
},
"compliance": {
"description": "合规性",
"certifications": [
"SOC2",
"ISO27001",
"GDPR"
],
"score": 5
}
}
def get_feature(self, feature_name: str) -> Dict:
return self.features.get(feature_name, {})
def get_all_features(self) -> Dict:
return self.features
def calculate_overall_score(self) -> float:
scores = [feature["score"] for feature in self.features.values()]
return sum(scores) / len(scores)Meta LLaMA
平台概述
python
class MetaLLaMA:
def __init__(self, api_key: str):
self.api_key = api_key
self.base_url = "https://api.meta.com/v1/chat/completions"
def chat(self, messages: List[Dict],
model: str = "llama-3.1-405b") -> Dict:
import requests
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
payload = {
"model": model,
"messages": messages
}
response = requests.post(
self.base_url,
json=payload,
headers=headers
)
return response.json()
def stream_chat(self, messages: List[Dict],
model: str = "llama-3.1-405b"):
import requests
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
payload = {
"model": model,
"messages": messages,
"stream": True
}
response = requests.post(
self.base_url,
json=payload,
headers=headers,
stream=True
)
for line in response.iter_lines():
if line:
yield line.decode("utf-8")平台特点
python
class MetaPlatformFeatures:
def __init__(self):
self.features = {
"model_variety": {
"description": "模型多样性",
"models": [
"LLaMA-3.1-405B",
"LLaMA-3.1-70B",
"LLaMA-3.1-8B",
"LLaMA-3-70B"
],
"score": 4
},
"performance": {
"description": "性能表现",
"metrics": {
"response_time": "< 1s",
"accuracy": "高",
"context_length": "128K"
},
"score": 4
},
"cost": {
"description": "成本",
"pricing": {
"LLaMA-3.1-405B": "$0.006/1K tokens",
"LLaMA-3.1-70B": "$0.001/1K tokens",
"LLaMA-3.1-8B": "$0.0002/1K tokens"
},
"score": 4
},
"ecosystem": {
"description": "生态支持",
"features": [
"开源模型",
"社区活跃",
"企业级支持"
],
"score": 4
},
"compliance": {
"description": "合规性",
"certifications": [
"SOC2",
"ISO27001",
"GDPR"
],
"score": 4
}
}
def get_feature(self, feature_name: str) -> Dict:
return self.features.get(feature_name, {})
def get_all_features(self) -> Dict:
return self.features
def calculate_overall_score(self) -> float:
scores = [feature["score"] for feature in self.features.values()]
return sum(scores) / len(scores)平台对比
综合对比
python
class InternationalPlatformComparator:
def __init__(self):
self.platforms = {
"openai": {
"name": "OpenAI ChatGPT",
"features": OpenAIPlatformFeatures()
},
"google": {
"name": "Google Gemini",
"features": GooglePlatformFeatures()
},
"anthropic": {
"name": "Anthropic Claude",
"features": AnthropicPlatformFeatures()
},
"meta": {
"name": "Meta LLaMA",
"features": MetaPlatformFeatures()
}
}
def compare_platforms(self) -> Dict:
comparison = {}
for platform_id, platform_info in self.platforms.items():
features = platform_info["features"]
comparison[platform_id] = {
"name": platform_info["name"],
"overall_score": features.calculate_overall_score(),
"features": features.get_all_features()
}
return comparison
def compare_by_feature(self, feature_name: str) -> Dict:
comparison = {}
for platform_id, platform_info in self.platforms.items():
features = platform_info["features"]
comparison[platform_id] = {
"name": platform_info["name"],
"feature": features.get_feature(feature_name)
}
return comparison
def rank_platforms(self) -> List[Dict]:
comparison = self.compare_platforms()
ranked = sorted(
comparison.items(),
key=lambda x: x[1]["overall_score"],
reverse=True
)
return [
{
"platform_id": platform_id,
**platform_info
}
for platform_id, platform_info in ranked
]
def generate_comparison_report(self) -> str:
comparison = self.compare_platforms()
ranked = self.rank_platforms()
report = f"""
国外AI平台对比报告
平台排名:
"""
for i, platform in enumerate(ranked, 1):
report += f"""
{i}. {platform['name']} - 综合得分: {platform['overall_score']:.2f}
"""
report += """
详细对比:
"""
for platform_id, platform_info in comparison.items():
report += f"""
{platform_info['name']}:
- 综合得分: {platform_info['overall_score']:.2f}
- 模型多样性: {platform_info['features']['model_variety']['score']}
- 性能表现: {platform_info['features']['performance']['score']}
- 成本: {platform_info['features']['cost']['score']}
- 生态支持: {platform_info['features']['ecosystem']['score']}
- 合规性: {platform_info['features']['compliance']['score']}
"""
return report实践练习
练习1:调用OpenAI ChatGPT
python
def call_openai_gpt(api_key, messages):
client = OpenAIChatGPT(api_key)
response = client.chat(messages, model="gpt-4o")
return response练习2:调用Google Gemini
python
def call_google_gemini(api_key, messages):
client = GoogleGemini(api_key)
response = client.chat(messages, model="gemini-1.5-pro")
return response练习3:对比国外平台
python
def compare_international_platforms():
comparator = InternationalPlatformComparator()
comparison = comparator.compare_platforms()
ranked = comparator.rank_platforms()
report = comparator.generate_comparison_report()
return comparison, ranked, report总结
本节我们学习了国外平台深度对比:
- OpenAI ChatGPT平台
- Google Gemini平台
- Anthropic Claude平台
- Meta LLaMA平台
- 平台特点对比
国外平台各有优势,需要根据具体需求选择。
