system_prompt = None # use the defaultmetadata = None # use the default metadatauser_id = "Sam-Julia" # combination of user name and assistant name is recommendedchat_model_name = "llama-3.1-70b-versatile"memory_model_name = "llama-3.1-70b-versatile"max_tokens = 150 # len of tokens to generate from LLMlimit = 4 # maximum number of memory to added during LLM chatdebug = True # enable to print debug messagesos.environ["GROQ_API_KEY"] = ""
Check documentation for other supported vectorstore, embedding model and LLM model
This is the example of chromadb as vector store, ollama as embedding model and groq as LLM. You can use other supported vectorstore, embedding model and LLM model-
Here’s how to solve some common problems when working with the CLI.
python
# Example to add buffer memorymemory_messages = [ {"role": "user", "content": "My name is Sam, what about you?"}, {"role": "assistant", "content": "Hello Sam! I'm Julia."}, {"role": "user", "content": "What do you like to eat?"}, {"role": "assistant", "content": "I like pizza"}]chatbot.add_memories(memory_messages, user_id=user_id)
# Buffer window memory, this will be acts as sliding window memory for LLMmessage_history = [{"role": "user", "content": "where r u from?"}, {"role": "assistant", "content": "I am from CA, USA"}, {"role": "user", "content": "ok"}, {"role": "assistant", "content": "hmm"}, {"role": "user", "content": "What are u doing on next Sunday?"}, {"role": "assistant", "content": "I am all available"} ]
# Example to chat with the bot, send latest / current query herequery = "Do you remember my name?"response = chatbot.chat(query=query, message_history=message_history, user_id=user_id, print_stream=True)print("Assistant: ", response)
# Example to check memories in bot based on user_idmemories = chatbot.get_memories(user_id=user_id)for m in memories: print(m)print("================================================================")related_memories = chatbot.related_memory(user_id=user_id, query="yes i am sam? what us your name")print(related_memories)
from chatformers.chatbot import Chatbotimport osfrom openai import OpenAIsystem_prompt = None # use the defaultmetadata = None # use the default metadatauser_id = "Sam-Julia"chat_model_name = "llama-3.1-70b-versatile"memory_model_name = "llama-3.1-70b-versatile"max_tokens = 150 # len of tokens to generate from LLMlimit = 4 # maximum number of memory to added during LLM chatdebug = True # enable to print debug messagesos.environ["GROQ_API_KEY"] = ""llm_client = OpenAI(base_url="https://api.groq.com/openai/v1", api_key="", ) # Any OpenAI Compatible LLM Clientconfig = { "vector_store": { "provider": "chroma", "config": { "collection_name": "test", "path": "db", } }, "embedder": { "provider": "ollama", "config": { "model": "nomic-embed-text:latest" } }, "llm": { "provider": "groq", "config": { "model": memory_model_name, "temperature": 0.1, "max_tokens": 1000, } },}chatbot = Chatbot(config=config, llm_client=llm_client, metadata=None, system_prompt=system_prompt, chat_model_name=chat_model_name, memory_model_name=memory_model_name, max_tokens=max_tokens, limit=limit, debug=debug)# Example to add buffer memorymemory_messages = [ {"role": "user", "content": "My name is Sam, what about you?"}, {"role": "assistant", "content": "Hello Sam! I'm Julia."}, {"role": "user", "content": "What do you like to eat?"}, {"role": "assistant", "content": "I like pizza"}]chatbot.add_memories(memory_messages, user_id=user_id)# Buffer window memory, this will be acts as sliding window memory for LLMmessage_history = [{"role": "user", "content": "where r u from?"}, {"role": "assistant", "content": "I am from CA, USA"}, {"role": "user", "content": "ok"}, {"role": "assistant", "content": "hmm"}, {"role": "user", "content": "What are u doing on next Sunday?"}, {"role": "assistant", "content": "I am all available"} ]# Example to chat with the bot, send latest / current query herequery = "Could you remind me what do you like to eat?"response = chatbot.chat(query=query, message_history=message_history, user_id=user_id, print_stream=True)print("Assistant: ", response)# Example to check memories in bot based on user_id# memories = chatbot.get_memories(user_id=user_id)# for m in memories:# print(m)# print("================================================================")# related_memories = chatbot.related_memory(user_id=user_id,# query="yes i am sam? what us your name")# print(related_memories)
Output Looks like-
INFO: USING BELOW GIVEN CONFIGS-{'embedder': {'config': {'model': 'nomic-embed-text:latest'}, 'provider': 'ollama'}, 'llm': {'config': {'max_tokens': 1000, 'model': 'llama-3.1-70b-versatile', 'temperature': 0.1}, 'provider': 'groq'}, 'vector_store': {'config': {'collection_name': 'test', 'path': 'db'}, 'provider': 'chroma'}}INFO: END OF CONFIGSINFO: SYSTEM PROMPT-You are a helpful assistant.You have access of following memories from old conversation you had earlier. You can refer these if required-Likes pizzaName is JuliaAssistant: I like to eat pizza.Process finished with exit code 0