update to recent davinci003 model
diff --git a/R/gpt3_completions.R b/R/gpt3_completions.R
index a2fc995..058d405 100644
--- a/R/gpt3_completions.R
+++ b/R/gpt3_completions.R
@@ -17,7 +17,7 @@
#'
#' @param prompt_var character vector that contains the prompts to the GPT-3 request
#' @param id_var (optional) character vector that contains the user-defined ids of the prompts. See details.
-#' @param param_model a character vector that indicates the [model](https://beta.openai.com/docs/models/gpt-3) to use; one of "text-davinci-002" (default), "text-curie-001", "text-babbage-001" or "text-ada-001"
+#' @param param_model a character vector that indicates the [model](https://beta.openai.com/docs/models/gpt-3) to use; one of "text-davinci-003" (default), "text-davinci-002", "text-davinci-001", "text-curie-001", "text-babbage-001" or "text-ada-001"
#' @param param_output_type character determining the output provided: "complete" (default), "text" or "meta"
#' @param param_suffix character (default: NULL) (from the official API documentation: _The suffix that comes after a completion of inserted text_)
#' @param param_max_tokens numeric (default: 100) indicating the maximum number of tokens that the completion request should return (from the official API documentation: _The maximum number of tokens to generate in the completion. The token count of your prompt plus max_tokens cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096)_)
@@ -67,7 +67,7 @@
gpt3_completions = function(prompt_var
, id_var
, param_output_type = 'complete'
- , param_model = 'text-davinci-002'
+ , param_model = 'text-davinci-003'
, param_suffix = NULL
, param_max_tokens = 100
, param_temperature = 0.9
diff --git a/R/gpt3_single_completion.R b/R/gpt3_single_completion.R
index a2ddc3b..65ea36c 100644
--- a/R/gpt3_single_completion.R
+++ b/R/gpt3_single_completion.R
@@ -12,7 +12,7 @@
#' - `stream`: [https://beta.openai.com/docs/api-reference/completions/create#completions/create-stream](https://beta.openai.com/docs/api-reference/completions/create#completions/create-stream)
#'
#' @param prompt_input character that contains the prompt to the GPT-3 request
-#' @param model a character vector that indicates the [model](https://beta.openai.com/docs/models/gpt-3) to use; one of "text-davinci-002" (default), "text-curie-001", "text-babbage-001" or "text-ada-001"
+#' @param model a character vector that indicates the [model](https://beta.openai.com/docs/models/gpt-3) to use; one of "text-davinci-003" (default), "text-davinci-002", "text-davinci-001", "text-curie-001", "text-babbage-001" or "text-ada-001"
#' @param output_type character determining the output provided: "complete" (default), "text" or "meta"
#' @param suffix character (default: NULL) (from the official API documentation: _The suffix that comes after a completion of inserted text_)
#' @param max_tokens numeric (default: 100) indicating the maximum number of tokens that the completion request should return (from the official API documentation: _The maximum number of tokens to generate in the completion. The token count of your prompt plus max_tokens cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096)_)
@@ -56,7 +56,7 @@
#' , max_tokens = 50)
#' @export
gpt3_single_completion = function(prompt_input
- , model = 'text-davinci-002'
+ , model = 'text-davinci-003'
, output_type = 'complete'
, suffix = NULL
, max_tokens = 100
diff --git a/README.md b/README.md
index 38f212e..07fdca4 100644
--- a/README.md
+++ b/README.md
@@ -171,6 +171,10 @@
You are free to make contributions to the package via pull requests. If you do so, you agree that your contributions will be licensed under the [GNU General Public License v3.0](https://github.com/ben-aaron188/rgpt3/blob/main/LICENSE.md).
+## Changelog/updates
+
+- [update] 29 Nov 2022: the just released [davinci-003 model](https://beta.openai.com/docs/models/gpt-3) for text completions is now the default model for the text completion functions.
+
## Citation
diff --git a/man/gpt3_completions.Rd b/man/gpt3_completions.Rd
index 59eb046..4fa2416 100644
--- a/man/gpt3_completions.Rd
+++ b/man/gpt3_completions.Rd
@@ -8,7 +8,7 @@
prompt_var,
id_var,
param_output_type = "complete",
- param_model = "text-davinci-002",
+ param_model = "text-davinci-003",
param_suffix = NULL,
param_max_tokens = 100,
param_temperature = 0.9,
@@ -28,7 +28,7 @@
\item{param_output_type}{character determining the output provided: "complete" (default), "text" or "meta"}
-\item{param_model}{a character vector that indicates the \href{https://beta.openai.com/docs/models/gpt-3}{model} to use; one of "text-davinci-002" (default), "text-curie-001", "text-babbage-001" or "text-ada-001"}
+\item{param_model}{a character vector that indicates the \href{https://beta.openai.com/docs/models/gpt-3}{model} to use; one of "text-davinci-003" (default), "text-davinci-002", "text-davinci-001", "text-curie-001", "text-babbage-001" or "text-ada-001"}
\item{param_suffix}{character (default: NULL) (from the official API documentation: \emph{The suffix that comes after a completion of inserted text})}
diff --git a/man/gpt3_single_completion.Rd b/man/gpt3_single_completion.Rd
index 78eab5e..ae540ca 100644
--- a/man/gpt3_single_completion.Rd
+++ b/man/gpt3_single_completion.Rd
@@ -6,7 +6,7 @@
\usage{
gpt3_single_completion(
prompt_input,
- model = "text-davinci-002",
+ model = "text-davinci-003",
output_type = "complete",
suffix = NULL,
max_tokens = 100,
@@ -23,7 +23,7 @@
\arguments{
\item{prompt_input}{character that contains the prompt to the GPT-3 request}
-\item{model}{a character vector that indicates the \href{https://beta.openai.com/docs/models/gpt-3}{model} to use; one of "text-davinci-002" (default), "text-curie-001", "text-babbage-001" or "text-ada-001"}
+\item{model}{a character vector that indicates the \href{https://beta.openai.com/docs/models/gpt-3}{model} to use; one of "text-davinci-003" (default), "text-davinci-002", "text-davinci-001", "text-curie-001", "text-babbage-001" or "text-ada-001"}
\item{output_type}{character determining the output provided: "complete" (default), "text" or "meta"}