API 使用示例


from langchain_openai import ChatOpenAI
from langchain.schema import HumanMessage, SystemMessage
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate

# Pisces langchain Demo
api_key = "pisces-xxxx"
base_url = "https://api.pisces.ink"

# 配置自定义 API
llm = ChatOpenAI(
    model_name="gpt-4o-mini",
    api_key=api_key,
    openai_api_base=base_url+"/v1",
    default_headers={"Authorization": "Bearer Pisces"},  # 使用 default_headers 而不是 headers
    streaming=True,
)

# 示例1: 直接使用
messages = [
    SystemMessage(content="You are a helpful assistant."),
    HumanMessage(content="Hello!")
]
response = llm(messages)
print(response.content)

# 示例2: 使用 PromptTemplate 和 LLMChain
template = """
You are a poetry expert. Write a {style} poem about {topic}.

Poem:
"""

prompt = PromptTemplate(
    input_variables=["style", "topic"],
    template=template
)

chain = LLMChain(llm=llm, prompt=prompt)

# 执行chain
result = chain.run(style="haiku", topic="spring flowers")
print(result)

                        

import requests
import json

# Pisces Post Demo
api_key = "pisces-xxxx"
base_url = "https://api.pisces.ink"

main_url = base_url + "/v1/chat/completions"
image_edit_url = base_url + "/v1/images/edits"
image_gen_url = base_url + "/v1/images/generations"

'''
# 文字对话:
response = requests.post(main_url, json={
    "messages": [
        {"role": "user", "content": "Hello"}
    ],
    "model": "gpt-4o-mini",  # 或其他模型名称
    "api_key": api_key
})

response = json.loads(response.text)
result_llm = response["choices"][0]["message"]["content"]
usage = response["usage"]["total_tokens"]
print(f"Response: \n{result_llm}\nUsage: {usage}")

# 图片上传:
response = requests.post(main_url, json={
    "messages": [
        {
            "role": "user",
            "content": [
                {"type": "text", "text": "你觉得这两个人谁更加帅气?"},
                {
                    "type": "image_url",
                    "image_url": "https://img2.baidu.com/it/u=429916013,2683831505&fm=253&fmt=auto&app=120&f=JPEG?w=500&h=750"
                },
                {
                    "type": "image_url",
                    "image_url": "https://img2.baidu.com/it/u=2478213317,2445871430&fm=253&fmt=auto&app=120&f=JPEG?w=500&h=779"
                }
            ]
        }
    ],
    "model": "gpt-4o-mini",  # 或其他模型名称
    "api_key": api_key
})

# 附件上传:
response = requests.post(main_url, json={
    "messages": [
        {
            "role": "user",
            "content": [
                {"type": "text", "text": "这篇文章讲了什么?"},
                {
                    "type": "file_url",
                    "file_url": "https://arxiv.org/pdf/2307.16789"
                }
            ]
        }
    ],
    "model": "gpt-4o-mini",  # 或其他模型名称
    "api_key": api_key
})


# 图片生成
result = requests.post(image_gen_url, json={
    "prompt": "Draw a rocket in front of a blackhole in deep space",
    "model": "FLUX-pro-1.1-ultra",
    "n": "1",
    "size": "1024x1024",
    "api_key": api_key
})
result = json.loads(result.text)
url = result["data"][0]["url"]
print(f"Response image url: {url}")
image_base64 = result["data"][0]["b64_json"]
image_bytes = base64.b64decode(image_base64)

# Save the image to a file
with open("blackhole.png", "wb") as f:
    f.write(image_bytes)



# 图片编辑
images = [
    f"data:image/png;base64,{base64.b64encode(open('doro.png', 'rb').read()).decode()}",
]

response = requests.post(image_edit_url, json={
    "prompt": "上传的doro狗手中举着一个牌子,上面写着'I'm Doro'",
    "image": images,
    "model": "GPT-Image-1",
    "n": "1",
    "size": "1024x1024",
    "api_key": api_key
})

response = json.loads(response.text)
try:
    result_llm = response["data"][0]["url"]
    print(f"Response image url: {result_llm}")
except:
    print(f"Error: {response}")
    result_llm = None

'''
if __name__ == "__main__":
    # 文字对话:
    response = requests.post(main_url, json={
        "messages": [
            {"role": "user", "content": "Hello"}
        ],
        "model": "gpt-4o-mini",  # 或其他模型名称
        "api_key": api_key
    })

    response = json.loads(response.text)
    result_llm = response["choices"][0]["message"]["content"]
    usage = response["usage"]["total_tokens"]
    print(f"Response: \n{result_llm}\nUsage: {usage}")

    # 流式传输
    response = requests.post(main_url, json={
        "messages": [
            {"role": "user", "content": "write a short poem."}
        ],
        "model": "gpt-4o-mini",  # 或其他模型名称
        "api_key": api_key,
        "stream": True
    }, stream=True)

    # 用于累积完整的响应
    full_response, json_data = "", {}
    for line in response.iter_lines():
        if line:
            # 删除"data: "前缀并解析JSON
            line = line.decode('utf-8')
            if line.startswith("data: "):
                if line.strip() == "data: [DONE]":
                    break

                json_data = json.loads(line[6:])  # 去掉"data: "前缀

                if 'choices' in json_data and json_data['choices']:
                    delta = json_data['choices'][0].get('delta', {})
                    if 'content' in delta:
                        content = delta['content']
                        full_response += content
                        print(content, end="", flush=True)  # 实时打印

    # print("\n\nFull response:", full_response)

    # 如果需要获取usage信息,可以从最后一个chunk中获取
    if 'usage' in json_data:
        print("\nUsage:", json_data['usage']['total_tokens'])
                        

import base64

import openai

# Pisces OpenAI Demo
api_key = "pisces-xxxx"
base_url = "https://api.pisces.ink"

client = openai.OpenAI(
    api_key=api_key,
    base_url=base_url+"/v1/",
)


# Text 2 Text
model = "gpt-4o-mini"

# Non-Streaming Example
response = client.chat.completions.create(
    model=model,
    messages=[
        {"role": "system", "content": "You are a helpful assistant."},
        {"role": "user", "content": "Hello!"}
    ],
)
print(response.choices[0].message.content)


# Streaming Example
stream = client.chat.completions.create(
    model=model,
    messages=[
        {"role": "user", "content": "write a short poem."}
    ],
    stream=True,
)
for chunk in stream:
    if chunk.choices[0].delta.content is not None:
        print(chunk.choices[0].delta.content, end="")



# Text 2 Image
# Image Generation Example
image_model = "playground-v3"
result = client.images.generate(
    model=image_model,
    prompt="A cute kitten",
    n=1,  # The number of images to generate
)
image_url = result.data[0].url
print(f"Image URL: {image_url}")


# Image Edit Example
image_model = "GPT-Image-1"
result = client.images.edit(
    model=image_model,
    prompt="上传的doro狗手中举着一个牌子,上面写着'I'm Doro'",
    image=[open('doro.png', 'rb')],  # The image to edit
    n=1,  # The number of images to generate
)
image_base64 = result.data[0].b64_json
image_bytes = base64.b64decode(image_base64)

# Save the image to a file
with open("doro_new.png", "wb") as f:
    f.write(image_bytes)

                        

API 参数

参数 类型 描述
user string 用于身份验证的用户名
api-key string Pisces API 密钥,用于身份验证
model string 使用的 AI 模型(例如:gpt-4o, claude-3.5-sonnet)
messages array 消息对象数组