fixed function names
diff --git a/R/gpt3_requests.R b/R/gpt3_completions.R
similarity index 92%
rename from R/gpt3_requests.R
rename to R/gpt3_completions.R
index 1342dc2..a2fc995 100644
--- a/R/gpt3_requests.R
+++ b/R/gpt3_completions.R
@@ -1,12 +1,12 @@
 #' Makes bunch completion requests to the GPT-3 API
 #'
 #' @description
-#' `gpt3_requests()` is the package's main function for rquests and takes as input a vector of prompts and processes each prompt as per the defined parameters. It extends the `gpt3_single_request()` function to allow for bunch processing of requests to the Open AI GPT-3 API.
+#' `gpt3_completions()` is the package's main function for rquests and takes as input a vector of prompts and processes each prompt as per the defined parameters. It extends the `gpt3_single_completion()` function to allow for bunch processing of requests to the Open AI GPT-3 API.
 #' @details
 #' The easiest (and intended) use case for this function is to create a data.frame or data.table with variables that contain the prompts to be requested from GPT-3 and a prompt id (see examples below).
 #' For a general guide on the completion requests, see [https://beta.openai.com/docs/guides/completion](https://beta.openai.com/docs/guides/completion). This function provides you with an R wrapper to send requests with the full range of request parameters as detailed on [https://beta.openai.com/docs/api-reference/completions](https://beta.openai.com/docs/api-reference/completions) and reproduced below.
 #'
-#' For the `best_of` parameter: The `gpt3_single_request()` (which is used here in a vectorised manner) handles the issue that best_of must be greater than n by setting `if(best_of <= n){ best_of = n}`.
+#' For the `best_of` parameter: The `gpt3_single_completion()` (which is used here in a vectorised manner) handles the issue that best_of must be greater than n by setting `if(best_of <= n){ best_of = n}`.
 #'
 #' If `id_var` is not provided, the function will use `prompt_1` ... `prompt_n` as id variable.
 #'
@@ -41,30 +41,30 @@
 #' # Once authenticated:
 #' # Assuming you have a data.table with 3 different prompts:
 #' dt_prompts = data.table::data.table('prompts' = c('What is the meaning if life?', 'Write a tweet about London:', 'Write a research proposal for using AI to fight fake news:'), 'prompt_id' = c(LETTERS[1:3]))
-#'gpt3_requests(prompt_var = dt_prompts$prompts
+#'gpt3_completions(prompt_var = dt_prompts$prompts
 #'    , id_var = dt_prompts$prompt_id)
 #'
 #' ## With more controls
-#'gpt3_requests(prompt_var = dt_prompts$prompts
+#'gpt3_completions(prompt_var = dt_prompts$prompts
 #'    , id_var = dt_prompts$prompt_id
 #'    , param_max_tokens = 50
 #'    , param_temperature = 0.5
 #'    , param_n = 5)
 #'
 #' ## Reproducible example (deterministic approach)
-#'gpt3_requests(prompt_var = dt_prompts$prompts
+#'gpt3_completions(prompt_var = dt_prompts$prompts
 #'    , id_var = dt_prompts$prompt_id
 #'    , param_max_tokens = 50
 #'    , param_temperature = 0.0)
 #'
 #' ## Changing the GPT-3 model
-#'gpt3_requests(prompt_var = dt_prompts$prompts
+#'gpt3_completions(prompt_var = dt_prompts$prompts
 #'    , id_var = dt_prompts$prompt_id
 #'    , param_model = 'text-babbage-001'
 #'    , param_max_tokens = 50
 #'    , param_temperature = 0.4)
 #' @export
-gpt3_requests = function(prompt_var
+gpt3_completions = function(prompt_var
                               , id_var
                               , param_output_type = 'complete'
                               , param_model = 'text-davinci-002'
@@ -93,7 +93,7 @@
 
     print(paste0('Request: ', i, '/', data_length))
 
-    row_outcome = gpt3_single_request(prompt_input = prompt_var[i]
+    row_outcome = gpt3_single_completion(prompt_input = prompt_var[i]
                                       , model = param_model
                                       , output_type = 'complete'
                                       , suffix = param_suffix
diff --git a/R/gpt3_single_request.R b/R/gpt3_single_completion.R
similarity index 94%
rename from R/gpt3_single_request.R
rename to R/gpt3_single_completion.R
index 4858140..a2ddc3b 100644
--- a/R/gpt3_single_request.R
+++ b/R/gpt3_single_completion.R
@@ -1,7 +1,7 @@
 #' Makes a single completion request to the GPT-3 API
 #'
 #' @description
-#' `gpt3_single_request()` sends a single [completion request](https://beta.openai.com/docs/api-reference/completions) to the Open AI GPT-3 API.
+#' `gpt3_single_completion()` sends a single [completion request](https://beta.openai.com/docs/api-reference/completions) to the Open AI GPT-3 API.
 #' @details For a general guide on the completion requests, see [https://beta.openai.com/docs/guides/completion](https://beta.openai.com/docs/guides/completion). This function provides you with an R wrapper to send requests with the full range of request parameters as detailed on [https://beta.openai.com/docs/api-reference/completions](https://beta.openai.com/docs/api-reference/completions) and reproduced below.
 #'
 #' For the `best_of` parameter: When used with n, best_of controls the number of candidate completions and n specifies how many to return – best_of must be greater than n. Note that this is handled by the wrapper automatically   if(best_of <= n){ best_of = n}.
@@ -36,26 +36,26 @@
 #' # Once authenticated:
 #'
 #' ## Simple request with defaults:
-#' gpt3_single_request(prompt_input = 'How old are you?')
+#' gpt3_single_completion(prompt_input = 'How old are you?')
 #'
 #' ## Instruct GPT-3 to write ten research ideas of max. 150 tokens with some controls:
-#'gpt3_single_request(prompt_input = 'Write a research idea about using text data to understand human behaviour:'
+#'gpt3_single_completion(prompt_input = 'Write a research idea about using text data to understand human behaviour:'
 #'    , temperature = 0.8
 #'    , n = 10
 #'    , max_tokens = 150)
 #'
 #' ## For fully reproducible results, we need `temperature = 0`, e.g.:
-#' gpt3_single_request(prompt_input = 'Finish this sentence:/n There is no easier way to learn R than'
+#' gpt3_single_completion(prompt_input = 'Finish this sentence:/n There is no easier way to learn R than'
 #'     , temperature = 0.0
 #'     , max_tokens = 50)
 #'
 #' ## The same example with a different GPT-3 model:
-#' gpt3_single_request(prompt_input = 'Finish this sentence:/n There is no easier way to learn R than'
+#' gpt3_single_completion(prompt_input = 'Finish this sentence:/n There is no easier way to learn R than'
 #'     , model = 'text-babbage-001'
 #'     , temperature = 0.0
 #'     , max_tokens = 50)
 #' @export
-gpt3_single_request = function(prompt_input
+gpt3_single_completion = function(prompt_input
                               , model = 'text-davinci-002'
                               , output_type = 'complete'
                               , suffix = NULL
diff --git a/R/test_request.R b/R/test_completion.R
similarity index 63%
rename from R/test_request.R
rename to R/test_completion.R
index 515abc2..bfd8836 100644
--- a/R/test_request.R
+++ b/R/test_completion.R
@@ -1,18 +1,18 @@
 #' Make a test request to the GPT-3 API
 #'
 #' @description
-#' `gpt3_test_request()` sends a basic [completion request](https://beta.openai.com/docs/api-reference/completions) to the Open AI GPT-3 API.
+#' `gpt3_test_completion()` sends a basic [completion request](https://beta.openai.com/docs/api-reference/completions) to the Open AI GPT-3 API.
 #' @param verbose (boolean) if TRUE prints the actual prompt and GPT-3 completion of the test request (default: TRUE).
 #' @return A message of success or failure of the connection.
 #' @examples
-#' gpt3_test_request()
+#' gpt3_test_completion()
 #' @export
-gpt3_test_request = function(verbose=T){
+gpt3_test_completion = function(verbose=T){
 
   check_apikey_form()
 
-  test_prompt = 'Write a story about R Studio:'
-  test_output = gpt3_single_request(prompt_ = test_prompt
+  test_prompt = 'Write a story about R Studio: '
+  test_output = gpt3_single_completion(prompt_ = test_prompt
                                   , max_tokens = 100)
   print(paste0('.. test successful ..'))