|
|
|
@ -32,9 +32,9 @@ class LLM:
|
|
|
|
|
return llm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_Qwen(self, question="如何应对压力?", model_path="Qwen/Qwen-1_8B-Chat"):
|
|
|
|
|
llm = Qwen(self.mode, model_path)
|
|
|
|
|
answer = llm.generate(question)
|
|
|
|
|
def test_Qwen(self, question="如何应对压力?", model_path="Qwen/Qwen-1_8B-Chat", api_key=None, proxy_url=None):
|
|
|
|
|
llm = Qwen(model_path=model_path, api_key=api_key, api_base=proxy_url)
|
|
|
|
|
answer = llm.chat(question)
|
|
|
|
|
print(answer)
|
|
|
|
|
|
|
|
|
|
def test_Gemini(self, question="如何应对压力?", model_path='gemini-pro', api_key=None, proxy_url=None):
|
|
|
|
@ -43,10 +43,12 @@ class LLM:
|
|
|
|
|
print(answer)
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
# llm = LLM()
|
|
|
|
|
llm = LLM()
|
|
|
|
|
# llm.test_Gemini(api_key='你的API Key', proxy_url=None)
|
|
|
|
|
# llm = LLM().init_model('Gemini', model_path= 'gemini-pro',api_key='AIzaSyBWAWfT8zsyAZcRIXLS5Vzlw8KKCN9qsAg', proxy_url='http://172.31.71.58:7890')
|
|
|
|
|
# response = llm.chat("如何应对压力?")
|
|
|
|
|
llm = LLM().init_model('VllmGPT', model_path= 'THUDM/chatglm3-6b')
|
|
|
|
|
response = llm.chat("如何应对压力?")
|
|
|
|
|
# llm = LLM().init_model('VllmGPT', model_path= 'THUDM/chatglm3-6b')
|
|
|
|
|
# response = llm.chat("如何应对压力?")
|
|
|
|
|
# print(response)
|
|
|
|
|
|
|
|
|
|
llm.test_Qwen(api_key="none", proxy_url="http://10.1.1.113:18000/v1")
|
|
|
|
|