update to recent davinci003 model
diff --git a/R/gpt3_completions.R b/R/gpt3_completions.R
index a2fc995..058d405 100644
--- a/R/gpt3_completions.R
+++ b/R/gpt3_completions.R
@@ -17,7 +17,7 @@
 #'
 #' @param prompt_var character vector that contains the prompts to the GPT-3 request
 #' @param id_var (optional) character vector that contains the user-defined ids of the prompts. See details.
-#' @param param_model a character vector that indicates the [model](https://beta.openai.com/docs/models/gpt-3) to use; one of "text-davinci-002" (default), "text-curie-001", "text-babbage-001" or "text-ada-001"
+#' @param param_model a character vector that indicates the [model](https://beta.openai.com/docs/models/gpt-3) to use; one of "text-davinci-003" (default), "text-davinci-002", "text-davinci-001", "text-curie-001", "text-babbage-001" or "text-ada-001"
 #' @param param_output_type character determining the output provided: "complete" (default), "text" or "meta"
 #' @param param_suffix character (default: NULL) (from the official API documentation: _The suffix that comes after a completion of inserted text_)
 #' @param param_max_tokens numeric (default: 100) indicating the maximum number of tokens that the completion request should return (from the official API documentation: _The maximum number of tokens to generate in the completion. The token count of your prompt plus max_tokens cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096)_)
@@ -67,7 +67,7 @@
 gpt3_completions = function(prompt_var
                               , id_var
                               , param_output_type = 'complete'
-                              , param_model = 'text-davinci-002'
+                              , param_model = 'text-davinci-003'
                               , param_suffix = NULL
                               , param_max_tokens = 100
                               , param_temperature = 0.9
diff --git a/R/gpt3_single_completion.R b/R/gpt3_single_completion.R
index a2ddc3b..65ea36c 100644
--- a/R/gpt3_single_completion.R
+++ b/R/gpt3_single_completion.R
@@ -12,7 +12,7 @@
 #'   - `stream`: [https://beta.openai.com/docs/api-reference/completions/create#completions/create-stream](https://beta.openai.com/docs/api-reference/completions/create#completions/create-stream)
 #'
 #' @param prompt_input character that contains the prompt to the GPT-3 request
-#' @param model a character vector that indicates the [model](https://beta.openai.com/docs/models/gpt-3) to use; one of "text-davinci-002" (default), "text-curie-001", "text-babbage-001" or "text-ada-001"
+#' @param model a character vector that indicates the [model](https://beta.openai.com/docs/models/gpt-3) to use; one of "text-davinci-003" (default), "text-davinci-002", "text-davinci-001", "text-curie-001", "text-babbage-001" or "text-ada-001"
 #' @param output_type character determining the output provided: "complete" (default), "text" or "meta"
 #' @param suffix character (default: NULL) (from the official API documentation: _The suffix that comes after a completion of inserted text_)
 #' @param max_tokens numeric (default: 100) indicating the maximum number of tokens that the completion request should return (from the official API documentation: _The maximum number of tokens to generate in the completion. The token count of your prompt plus max_tokens cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096)_)
@@ -56,7 +56,7 @@
 #'     , max_tokens = 50)
 #' @export
 gpt3_single_completion = function(prompt_input
-                              , model = 'text-davinci-002'
+                              , model = 'text-davinci-003'
                               , output_type = 'complete'
                               , suffix = NULL
                               , max_tokens = 100