|
| 1 | +""" |
| 2 | +Large Language Model (LLM) clients. |
| 3 | +
|
| 4 | + .. note:: |
| 5 | + As a part of the development process when choosing models it is |
| 6 | + important to research and understand the models you are using. |
| 7 | +
|
| 8 | + A good example of why this is important can be found in the |
| 9 | + description of the Phi-3 model from Microsoft which discusses |
| 10 | + responsible AI considerations such as the limitations of the |
| 11 | + model, and appropriate use cases. |
| 12 | + https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-gguf#responsible-ai-considerations |
| 13 | +""" |
| 14 | + |
| 15 | + |
| 16 | +class ModelClient: |
| 17 | + """ |
| 18 | + A base class to define the interface for language model clients. |
| 19 | + """ |
| 20 | + |
| 21 | + def __init__(self, chatbot, model: str, **kwargs): |
| 22 | + self.chatbot = chatbot |
| 23 | + self.model = model |
| 24 | + |
| 25 | + |
| 26 | +class Ollama(ModelClient): |
| 27 | + """ |
| 28 | + This client class allows the use of Ollama models for chatbot responses. |
| 29 | +
|
| 30 | + .. warning:: |
| 31 | + This is a new and experimental class. It may not work as expected |
| 32 | + and its functionality may change in future releases. |
| 33 | +
|
| 34 | + .. note:: |
| 35 | + Added in version 1.2.7 |
| 36 | + """ |
| 37 | + |
| 38 | + def __init__(self, chatbot, model: str, **kwargs): |
| 39 | + """ |
| 40 | + keyword arguments: |
| 41 | + host: The host URL for the Ollama server. |
| 42 | + Default is 'http://localhost:11434'. |
| 43 | + """ |
| 44 | + super().__init__(chatbot, model) |
| 45 | + from ollama import Client, AsyncClient |
| 46 | + |
| 47 | + self.host = kwargs.get('host', 'http://localhost:11434') |
| 48 | + |
| 49 | + # TODO: Look into supporting the async client |
| 50 | + self.async_mode = False |
| 51 | + |
| 52 | + # https://github.com/ollama/ollama-python |
| 53 | + if self.async_mode: |
| 54 | + self.client = AsyncClient( |
| 55 | + host=self.host, |
| 56 | + ) |
| 57 | + else: |
| 58 | + self.client = Client( |
| 59 | + host=self.host, |
| 60 | + ) |
| 61 | + |
| 62 | + def process(self, statement): |
| 63 | + |
| 64 | + system_message = { |
| 65 | + 'role': 'system', |
| 66 | + 'content': 'Please keep responses short and concise.' |
| 67 | + } |
| 68 | + message = { |
| 69 | + 'role': 'user', |
| 70 | + 'content': statement.text |
| 71 | + } |
| 72 | + |
| 73 | + if self.chatbot.stream: |
| 74 | + for part in self.client.chat( |
| 75 | + model=self.model, |
| 76 | + messages=[system_message, message], |
| 77 | + stream=True |
| 78 | + ): |
| 79 | + yield part['message']['content'] |
| 80 | + else: |
| 81 | + response = self.client.chat( |
| 82 | + model=self.model, |
| 83 | + messages=[system_message, message] |
| 84 | + ) |
| 85 | + |
| 86 | + return response.message.content |
| 87 | + |
| 88 | + |
| 89 | +class OpenAI(ModelClient): |
| 90 | + """ |
| 91 | + This client class allows the use of the OpenAI API to generate chatbot responses. |
| 92 | +
|
| 93 | + .. warning:: |
| 94 | + This is a new and experimental class. It may not work as expected |
| 95 | + and its functionality may change in future releases. |
| 96 | +
|
| 97 | + .. note:: |
| 98 | + Added in version 1.2.7 |
| 99 | + """ |
| 100 | + |
| 101 | + def __init__(self, chatbot, model, **kwargs): |
| 102 | + super().__init__(chatbot, model, **kwargs) |
| 103 | + from openai import OpenAI as OpenAIClient |
| 104 | + from openai import AsyncOpenAI as AsyncOpenAIClient |
| 105 | + |
| 106 | + self.host = kwargs.get('host', None) |
| 107 | + |
| 108 | + # TODO: Look into supporting the async client |
| 109 | + self.async_mode = False |
| 110 | + |
| 111 | + # https://github.com/openai/openai-python |
| 112 | + if self.async_mode: |
| 113 | + self.client = AsyncOpenAIClient( |
| 114 | + base_url=self.host, |
| 115 | + ) |
| 116 | + else: |
| 117 | + self.client = OpenAIClient( |
| 118 | + base_url=self.host, |
| 119 | + ) |
| 120 | + |
| 121 | + def process(self, statement): |
| 122 | + |
| 123 | + system_message = { |
| 124 | + 'role': 'developer', |
| 125 | + 'content': 'Please keep responses short and concise.' |
| 126 | + } |
| 127 | + message = { |
| 128 | + 'role': 'user', |
| 129 | + 'content': statement.text |
| 130 | + } |
| 131 | + |
| 132 | + if self.chatbot.stream: |
| 133 | + for part in self.client.chat.completions.create( |
| 134 | + model=self.model, |
| 135 | + messages=[system_message, message], |
| 136 | + stream=True |
| 137 | + ): |
| 138 | + yield part |
| 139 | + else: |
| 140 | + response = self.client.chat.completions.create( |
| 141 | + model=self.model, |
| 142 | + messages=[system_message, message] |
| 143 | + ) |
| 144 | + |
| 145 | + return response.output_text |
0 commit comments