از PalM API به Gemini API مهاجرت کنید

این راهنما نشان می دهد که چگونه کد پایتون خود را از استفاده از PaLM API به Gemini API منتقل کنید. می‌توانید با Gemini هم مکالمات متنی و هم چند نوبتی (چت) ایجاد کنید، اما مطمئن شوید که پاسخ‌های خود را بررسی کنید زیرا ممکن است با خروجی‌های PalM متفاوت باشند.

خلاصه تفاوت های API

  1. نام روش ها تغییر کرده است. به‌جای داشتن روش‌های جداگانه برای تولید متن و چت، یک روش generate_content وجود دارد که می‌تواند هر دو را انجام دهد.
  2. Chat یک روش کمکی start_chat دارد که چت را ساده‌تر می‌کند.
  3. به جای توابع مستقل، APIهای جدید متدهایی از کلاس GenerativeModel هستند.
  4. ساختار پاسخ خروجی تغییر کرده است.
  5. دسته بندی تنظیمات ایمنی تغییر کرده است. برای جزئیات به راهنمای تنظیمات ایمنی مراجعه کنید.

تولید متن: پایه

پالم جوزا
pip install google-generativeai

import google.generativeai as palm
import os

palm.configure(
    api_key=os.environ['API_KEY'])

response = palm.generate_text(
    prompt="The opposite of hot is")
print(response.result) #  'cold.'
        
pip install google-generativeai

import google.generativeai as genai
import os

genai.configure(
    api_key=os.environ['API_KEY'])
model = genai.GenerativeModel(
    model_name='gemini-pro')

response = model.generate_content(
          'The opposite of hot is')
print(response.text)
    #  The opposite of hot is cold.'
        

تولید متن: پارامترهای اختیاری

پالم جوزا
pip install google-generativeai

import google.generativeai as palm
import os

palm.configure(
    api_key=os.environ['API_KEY'])

prompt = """
You are an expert at solving word
problems.

Solve the following problem:

I have three houses, each with three
cats. Each cat owns 4 mittens, and a hat.
Each mitten was knit from 7m of yarn,
each hat from 4m. How much yarn was
needed to make all the items?

Think about it step by step, and show
your work.
"""

completion = palm.generate_text(
    model=model,
    prompt=prompt,
    temperature=0,
    # The maximum length of response
    max_output_tokens=800,
)

print(completion.result)
        
pip install google-generativeai

import google.generativeai as genai
import os

genai.configure(
    api_key=os.environ['API_KEY'])
model = genai.GenerativeModel(
    model_name='gemini-pro')

prompt = """
You are an expert at solving word
problems.

Solve the following problem:

I have three houses, each with three
cats. Each cat owns 4 mittens, and a hat.
Each mitten was knit from 7m of yarn,
each hat from 4m. How much yarn was
needed to make all the items?

Think about it step by step, and show
your work.
"""

completion = model.generate_content(
    prompt,
    generation_config={
        'temperature': 0,
        'max_output_tokens': 800
    }
)

print(completion.text)
        

چت: اساسی

پالم جوزا
pip install google-generativeai

import google.generativeai as palm
import os

palm.configure(
    api_key=os.environ['API_KEY'])

chat = palm.chat(
    messages=["Hello."])
print(chat.last)
    #  'Hello! What can I help you with?'
chat = chat.reply(
    "Just chillin'")
print(chat.last)
    #  'That's great! ...'
        
pip install google-generativeai

import google.generativeai as genai
import os

genai.configure(
    api_key=os.environ['API_KEY'])
model = genai.GenerativeModel(
    model_name='gemini-pro')
chat = model.start_chat()

response = chat.send_message(
          "Hello.")
print(response.text)
response = chat.send_message(
          "Just chillin'")
print(response.text)
        

چت: تاریخچه مکالمه

پالم جوزا
chat.messages

[{'author': '0', 'content': 'Hello'},
 {'author': '1', 'content': 'Hello! How can I help you today?'},
 {'author': '0', 'content': "Just chillin'"},
 {'author': '1',
  'content': "That's great! I'm glad you're able to relax and
      take some time for yourself. What are you up to today?"}]
        
chat.history

[parts {
   text: "Hello."
 }
 role: "user",
 parts {
   text: "Greetings! How may I assist you today?"
 }
 role: "assistant",
 parts {
   text: "Just chillin\'"
 }
 role: "user",
 parts {
   text: "That\'s great! I\'m glad to hear
   you\'re having a relaxing time.
   May I offer you any virtual entertainment
   or assistance? I can provide
   you with music recommendations, play
   games with you, or engage in a
   friendly conversation.\n\nAdditionally,
   I\'m capable of generating
   creative content, such as poems, stories,
   or even song lyrics.
   If you\'d like, I can surprise you with
   something unique.\n\nJust
   let me know what you\'re in the mood for,
   and I\'ll be happy to oblige."
 }
 role: "assistant"]
        

چت: دما

پالم جوزا
# Setting temperature=1 usually produces more zany responses!
chat = palm.chat(messages="What should I eat for dinner tonight? List a few options", temperature=1)
chat.last

'Here are a few ideas ...
        
model = genai.GenerativeModel(model_name='gemini-pro')
chat = model.start_chat()

# Setting temperature=1 usually produces more zany responses!
response = chat.send_message(
        "What should I eat for dinner tonight? List a few options",
          generation_config={
          'temperature': 1.0
        })

print(response.text)

'1. Grilled Salmon with Roasted Vegetables: ...'
        

مراحل بعدی

  • برای جزئیات بیشتر در مورد آخرین مدل‌ها و ویژگی‌ها، به نمای کلی Gemini API مراجعه کنید.