update default model to llama3.2 (#6959)

This commit is contained in:
Jeffrey Morgan
2024-09-25 11:11:22 -07:00
committed by GitHub
parent e9e9bdb8d9
commit 55ea963c9e
29 changed files with 102 additions and 100 deletions

View File

@@ -35,7 +35,7 @@ func main() {
ctx := context.Background()
req := &api.ChatRequest{
Model: "llama3.1",
Model: "llama3.2",
Messages: messages,
}

View File

@@ -4,10 +4,10 @@ This example provides an interface for asking questions to a PDF document.
## Setup
1. Ensure you have the `llama3.1` model installed:
1. Ensure you have the `llama3.2` model installed:
```
ollama pull llama3.1
ollama pull llama3.2
```
2. Install the Python Requirements.

View File

@@ -51,7 +51,7 @@ while True:
template=template,
)
llm = Ollama(model="llama3.1", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))
llm = Ollama(model="llama3.2", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vectorstore.as_retriever(),

View File

@@ -4,10 +4,10 @@ This example summarizes the website, [https://ollama.com/blog/run-llama2-uncenso
## Running the Example
1. Ensure you have the `llama3.1` model installed:
1. Ensure you have the `llama3.2` model installed:
```bash
ollama pull llama3.1
ollama pull llama3.2
```
2. Install the Python Requirements.

View File

@@ -5,7 +5,7 @@ from langchain.chains.summarize import load_summarize_chain
loader = WebBaseLoader("https://ollama.com/blog/run-llama2-uncensored-locally")
docs = loader.load()
llm = Ollama(model="llama3.1")
llm = Ollama(model="llama3.2")
chain = load_summarize_chain(llm, chain_type="stuff")
result = chain.invoke(docs)

View File

@@ -4,10 +4,10 @@ This example is a basic "hello world" of using LangChain with Ollama.
## Running the Example
1. Ensure you have the `llama3.1` model installed:
1. Ensure you have the `llama3.2` model installed:
```bash
ollama pull llama3.1
ollama pull llama3.2
```
2. Install the Python Requirements.

View File

@@ -1,6 +1,6 @@
from langchain.llms import Ollama
input = input("What is your question?")
llm = Ollama(model="llama3.1")
llm = Ollama(model="llama3.2")
res = llm.predict(input)
print (res)

View File

@@ -1,4 +1,4 @@
FROM llama3.1
FROM llama3.2
PARAMETER temperature 1
SYSTEM """
You are Mario from super mario bros, acting as an assistant.

View File

@@ -2,12 +2,12 @@
# Example character: Mario
This example shows how to create a basic character using Llama3.1 as the base model.
This example shows how to create a basic character using Llama 3.2 as the base model.
To run this example:
1. Download the Modelfile
2. `ollama pull llama3.1` to get the base model used in the model file.
2. `ollama pull llama3.2` to get the base model used in the model file.
3. `ollama create NAME -f ./Modelfile`
4. `ollama run NAME`
@@ -18,7 +18,7 @@ Ask it some questions like "Who are you?" or "Is Peach in trouble again?"
What the model file looks like:
```
FROM llama3.1
FROM llama3.2
PARAMETER temperature 1
SYSTEM """
You are Mario from Super Mario Bros, acting as an assistant.

View File

@@ -1,14 +1,14 @@
# RAG Hallucination Checker using Bespoke-Minicheck
This example allows the user to ask questions related to a document, which can be specified via an article url. Relevant chunks are retreived from the document and given to `llama3.1` as context to answer the question. Then each sentence in the answer is checked against the retrieved chunks using `bespoke-minicheck` to ensure that the answer does not contain hallucinations.
This example allows the user to ask questions related to a document, which can be specified via an article url. Relevant chunks are retreived from the document and given to `llama3.2` as context to answer the question. Then each sentence in the answer is checked against the retrieved chunks using `bespoke-minicheck` to ensure that the answer does not contain hallucinations.
## Running the Example
1. Ensure `all-minilm` (embedding) `llama3.1` (chat) and `bespoke-minicheck` (check) models installed:
1. Ensure `all-minilm` (embedding) `llama3.2` (chat) and `bespoke-minicheck` (check) models installed:
```bash
ollama pull all-minilm
ollama pull llama3.1
ollama pull llama3.2
ollama pull bespoke-minicheck
```

View File

@@ -119,7 +119,7 @@ if __name__ == "__main__":
system_prompt = f"Only use the following information to answer the question. Do not use anything else: {sourcetext}"
ollama_response = ollama.generate(
model="llama3.1",
model="llama3.2",
prompt=question,
system=system_prompt,
options={"stream": False},

View File

@@ -2,7 +2,7 @@ import requests
import json
import random
model = "llama3.1"
model = "llama3.2"
template = {
"firstName": "",
"lastName": "",

View File

@@ -12,7 +12,7 @@ countries = [
"France",
]
country = random.choice(countries)
model = "llama3.1"
model = "llama3.2"
prompt = f"generate one realistically believable sample data set of a persons first name, last name, address in {country}, and phone number. Do not use common names. Respond using JSON. Key names should have no backslashes, values should use plain ascii with no special characters."

View File

@@ -6,10 +6,10 @@ There are two python scripts in this example. `randomaddresses.py` generates ran
## Running the Example
1. Ensure you have the `llama3.1` model installed:
1. Ensure you have the `llama3.2` model installed:
```bash
ollama pull llama3.1
ollama pull llama3.2
```
2. Install the Python Requirements.

View File

@@ -2,7 +2,7 @@ import json
import requests
# NOTE: ollama must be running for this to work, start the ollama app or run `ollama serve`
model = "llama3.1" # TODO: update this for whatever model you wish to use
model = "llama3.2" # TODO: update this for whatever model you wish to use
def chat(messages):

View File

@@ -4,10 +4,10 @@ The **chat** endpoint is one of two ways to generate text from an LLM with Ollam
## Running the Example
1. Ensure you have the `llama3.1` model installed:
1. Ensure you have the `llama3.2` model installed:
```bash
ollama pull llama3.1
ollama pull llama3.2
```
2. Install the Python Requirements.

View File

@@ -1,6 +1,6 @@
import * as readline from "readline";
const model = "llama3.1";
const model = "llama3.2";
type Message = {
role: "assistant" | "user" | "system";
content: string;