updated function names
diff --git a/NAMESPACE b/NAMESPACE
index 9a80e08..7057df6 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -1,9 +1,10 @@
 # Generated by roxygen2: do not edit by hand
 
 export(gpt3_authenticate)
-export(gpt3_bunch_request)
-export(gpt3_make_embedding)
-export(gpt3_make_request)
+export(gpt3_embeddings)
+export(gpt3_requests)
+export(gpt3_single_request)
 export(gpt3_test_request)
+export(gpt_single_embedding)
 export(to_numeric)
 export(url.completions)
diff --git a/R/bunch_embedding.R b/R/gpt3_embeddings.R
similarity index 82%
rename from R/bunch_embedding.R
rename to R/gpt3_embeddings.R
index d22a3ac..513a5b8 100644
--- a/R/bunch_embedding.R
+++ b/R/gpt3_embeddings.R
@@ -1,7 +1,7 @@
 #' Retrieves text embeddings for character input from a vector from the GPT-3 API
 #'
 #' @description
-#' `gpt3_bunch_embedding()` extends the single embeddings function `gpt3_make_embedding()` to allow for the processing of a whole vector
+#' `gpt3_embeddings()` extends the single embeddings function `gpt3_single_embedding()` to allow for the processing of a whole vector
 #' @details The returned data.table contains the column `id` which indicates the text id (or its generic alternative if not specified) and the columns `dim_1` ... `dim_{max}`, where `max` is the length of the text embeddings vector that the four different models return. For the default "Ada" model, these are 1024 dimensions (i.e., `dim_1`... `dim_1024`).
 #'
 #' The function supports the text similarity embeddings for the four GPT-3 models as specified in the parameter list. The main difference between the four models is the sophistication of the embedding representation as indicated by the vector embedding size.
@@ -21,19 +21,19 @@
 #' # First authenticate with your API key via `gpt3_authenticate('pathtokey')`
 #'
 #' # Use example data:
-#' ## The data below were generated with the `gpt3_make_request()` function as follows:
+#' ## The data below were generated with the `gpt3_single_request()` function as follows:
 #' ##### DO NOT RUN #####
-#' # travel_blog_data = gpt3_make_request(prompt_input = "Write a travel blog about a dog's journey through the UK:", temperature = 0.8, n = 10, max_tokens = 200)[[1]]
+#' # travel_blog_data = gpt3_single_request(prompt_input = "Write a travel blog about a dog's journey through the UK:", temperature = 0.8, n = 10, max_tokens = 200)[[1]]
 #' ##### END DO NOT RUN #####
 #'
 #' # You can load these data with:
 #' data("travel_blog_data") # the dataset contains 10 completions for the above request
 #'
 #' ## Obtain text embeddings for the completion texts:
-#' emb_travelblogs = gpt3_bunch_embedding(input_var = travel_blog_data$gpt3)
+#' emb_travelblogs = gpt3_embeddings(input_var = travel_blog_data$gpt3)
 #' dim(emb_travelblogs)
 #' @export
-gpt3_bunch_embedding = function(input_var
+gpt3_embeddings = function(input_var
                                 , id_var
                                 , param_model = 'text-similarity-ada-001'){
 
@@ -50,7 +50,7 @@
 
     print(paste0('Embedding: ', i, '/', data_length))
 
-    row_outcome = gpt3_make_embedding(model = param_model
+    row_outcome = gpt3_single_embedding(model = param_model
                                       , input = input_var[i])
 
     empty_df = data.frame(t(row_outcome))
diff --git a/R/bunch_request.R b/R/gpt3_requests.R
similarity index 92%
rename from R/bunch_request.R
rename to R/gpt3_requests.R
index dc9a7f2..1342dc2 100644
--- a/R/bunch_request.R
+++ b/R/gpt3_requests.R
@@ -1,12 +1,12 @@
 #' Makes bunch completion requests to the GPT-3 API
 #'
 #' @description
-#' `gpt3_bunch_request()` is the package's main function for rquests and takes as input a vector of prompts and processes each prompt as per the defined parameters. It extends the `gpt3_make_request()` function to allow for bunch processing of requests to the Open AI GPT-3 API.
+#' `gpt3_requests()` is the package's main function for rquests and takes as input a vector of prompts and processes each prompt as per the defined parameters. It extends the `gpt3_single_request()` function to allow for bunch processing of requests to the Open AI GPT-3 API.
 #' @details
 #' The easiest (and intended) use case for this function is to create a data.frame or data.table with variables that contain the prompts to be requested from GPT-3 and a prompt id (see examples below).
 #' For a general guide on the completion requests, see [https://beta.openai.com/docs/guides/completion](https://beta.openai.com/docs/guides/completion). This function provides you with an R wrapper to send requests with the full range of request parameters as detailed on [https://beta.openai.com/docs/api-reference/completions](https://beta.openai.com/docs/api-reference/completions) and reproduced below.
 #'
-#' For the `best_of` parameter: The `gpt3_make_request()` (which is used here in a vectorised manner) handles the issue that best_of must be greater than n by setting `if(best_of <= n){ best_of = n}`.
+#' For the `best_of` parameter: The `gpt3_single_request()` (which is used here in a vectorised manner) handles the issue that best_of must be greater than n by setting `if(best_of <= n){ best_of = n}`.
 #'
 #' If `id_var` is not provided, the function will use `prompt_1` ... `prompt_n` as id variable.
 #'
@@ -41,30 +41,30 @@
 #' # Once authenticated:
 #' # Assuming you have a data.table with 3 different prompts:
 #' dt_prompts = data.table::data.table('prompts' = c('What is the meaning if life?', 'Write a tweet about London:', 'Write a research proposal for using AI to fight fake news:'), 'prompt_id' = c(LETTERS[1:3]))
-#'gpt3_bunch_request(prompt_var = dt_prompts$prompts
+#'gpt3_requests(prompt_var = dt_prompts$prompts
 #'    , id_var = dt_prompts$prompt_id)
 #'
 #' ## With more controls
-#'gpt3_bunch_request(prompt_var = dt_prompts$prompts
+#'gpt3_requests(prompt_var = dt_prompts$prompts
 #'    , id_var = dt_prompts$prompt_id
 #'    , param_max_tokens = 50
 #'    , param_temperature = 0.5
 #'    , param_n = 5)
 #'
 #' ## Reproducible example (deterministic approach)
-#'gpt3_bunch_request(prompt_var = dt_prompts$prompts
+#'gpt3_requests(prompt_var = dt_prompts$prompts
 #'    , id_var = dt_prompts$prompt_id
 #'    , param_max_tokens = 50
 #'    , param_temperature = 0.0)
 #'
 #' ## Changing the GPT-3 model
-#'gpt3_bunch_request(prompt_var = dt_prompts$prompts
+#'gpt3_requests(prompt_var = dt_prompts$prompts
 #'    , id_var = dt_prompts$prompt_id
 #'    , param_model = 'text-babbage-001'
 #'    , param_max_tokens = 50
 #'    , param_temperature = 0.4)
 #' @export
-gpt3_bunch_request = function(prompt_var
+gpt3_requests = function(prompt_var
                               , id_var
                               , param_output_type = 'complete'
                               , param_model = 'text-davinci-002'
@@ -93,7 +93,7 @@
 
     print(paste0('Request: ', i, '/', data_length))
 
-    row_outcome = gpt3_make_request(prompt_input = prompt_var[i]
+    row_outcome = gpt3_single_request(prompt_input = prompt_var[i]
                                       , model = param_model
                                       , output_type = 'complete'
                                       , suffix = param_suffix
diff --git a/R/make_embedding.R b/R/gpt3_single_embedding.R
similarity index 88%
rename from R/make_embedding.R
rename to R/gpt3_single_embedding.R
index a916ac6..8ec6d23 100644
--- a/R/make_embedding.R
+++ b/R/gpt3_single_embedding.R
@@ -1,7 +1,7 @@
 #' Obtains text embeddings for a single character (string) from the GPT-3 API
 #'
 #' @description
-#' `gpt3_make_embedding()` sends a single [embedding request](https://beta.openai.com/docs/guides/embeddings) to the Open AI GPT-3 API.
+#' `gpt_single_embedding()` sends a single [embedding request](https://beta.openai.com/docs/guides/embeddings) to the Open AI GPT-3 API.
 #' @details The function supports the text similarity embeddings for the four GPT-3 models as specified in the parameter list. The main difference between the four models is the sophistication of the embedding representation as indicated by the vector embedding size.
 #'   - Ada (1024 dimensions)
 #'   - Babbage (2048 dimensions)
@@ -21,13 +21,13 @@
 #'
 #' ## Simple request with defaults:
 #' sample_string = "London is one of the most liveable cities in the world. The city is always full of energy and people. It's always a great place to explore and have fun."
-#' gpt3_make_embedding(input = sample_string)
+#' gpt_single_embedding(input = sample_string)
 #'
 #' ## Change the model:
-#' #' gpt3_make_embedding(input = sample_string
+#' #' gpt_single_embedding(input = sample_string
 #'     , model = 'text-similarity-curie-001')
 #' @export
-gpt3_make_embedding = function(input
+gpt_single_embedding = function(input
                                , model = 'text-similarity-ada-001'
                                ){
 
diff --git a/R/make_request.R b/R/gpt3_single_request.R
similarity index 94%
rename from R/make_request.R
rename to R/gpt3_single_request.R
index 7fa653a..4858140 100644
--- a/R/make_request.R
+++ b/R/gpt3_single_request.R
@@ -1,7 +1,7 @@
 #' Makes a single completion request to the GPT-3 API
 #'
 #' @description
-#' `gpt3_make_request()` sends a single [completion request](https://beta.openai.com/docs/api-reference/completions) to the Open AI GPT-3 API.
+#' `gpt3_single_request()` sends a single [completion request](https://beta.openai.com/docs/api-reference/completions) to the Open AI GPT-3 API.
 #' @details For a general guide on the completion requests, see [https://beta.openai.com/docs/guides/completion](https://beta.openai.com/docs/guides/completion). This function provides you with an R wrapper to send requests with the full range of request parameters as detailed on [https://beta.openai.com/docs/api-reference/completions](https://beta.openai.com/docs/api-reference/completions) and reproduced below.
 #'
 #' For the `best_of` parameter: When used with n, best_of controls the number of candidate completions and n specifies how many to return – best_of must be greater than n. Note that this is handled by the wrapper automatically   if(best_of <= n){ best_of = n}.
@@ -36,26 +36,26 @@
 #' # Once authenticated:
 #'
 #' ## Simple request with defaults:
-#' gpt3_make_request(prompt_input = 'How old are you?')
+#' gpt3_single_request(prompt_input = 'How old are you?')
 #'
 #' ## Instruct GPT-3 to write ten research ideas of max. 150 tokens with some controls:
-#'gpt3_make_request(prompt_input = 'Write a research idea about using text data to understand human behaviour:'
+#'gpt3_single_request(prompt_input = 'Write a research idea about using text data to understand human behaviour:'
 #'    , temperature = 0.8
 #'    , n = 10
 #'    , max_tokens = 150)
 #'
 #' ## For fully reproducible results, we need `temperature = 0`, e.g.:
-#' gpt3_make_request(prompt_input = 'Finish this sentence:/n There is no easier way to learn R than'
+#' gpt3_single_request(prompt_input = 'Finish this sentence:/n There is no easier way to learn R than'
 #'     , temperature = 0.0
 #'     , max_tokens = 50)
 #'
 #' ## The same example with a different GPT-3 model:
-#' gpt3_make_request(prompt_input = 'Finish this sentence:/n There is no easier way to learn R than'
+#' gpt3_single_request(prompt_input = 'Finish this sentence:/n There is no easier way to learn R than'
 #'     , model = 'text-babbage-001'
 #'     , temperature = 0.0
 #'     , max_tokens = 50)
 #' @export
-gpt3_make_request = function(prompt_input
+gpt3_single_request = function(prompt_input
                               , model = 'text-davinci-002'
                               , output_type = 'complete'
                               , suffix = NULL
diff --git a/R/test_request.R b/R/test_request.R
index 7b8c7e4..673f1c0 100644
--- a/R/test_request.R
+++ b/R/test_request.R
@@ -12,7 +12,7 @@
   check_apikey_form()
 
   test_prompt = 'Write a story about R Studio:'
-  test_outout = gpt3_make_request(prompt_ = test_prompt
+  test_outout = gpt3_single_request(prompt_ = test_prompt
                                   , max_tokens = 100)
   print(paste0('.. test successful ..'))
 
diff --git a/man/gpt3_bunch_embedding.Rd b/man/gpt3_embeddings.Rd
similarity index 75%
rename from man/gpt3_bunch_embedding.Rd
rename to man/gpt3_embeddings.Rd
index 9a33169..6c2ac9a 100644
--- a/man/gpt3_bunch_embedding.Rd
+++ b/man/gpt3_embeddings.Rd
@@ -1,14 +1,10 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/bunch_embedding.R
-\name{gpt3_bunch_embedding}
-\alias{gpt3_bunch_embedding}
+% Please edit documentation in R/gpt3_embeddings.R
+\name{gpt3_embeddings}
+\alias{gpt3_embeddings}
 \title{Retrieves text embeddings for character input from a vector from the GPT-3 API}
 \usage{
-gpt3_bunch_embedding(
-  input_var,
-  id_var,
-  param_model = "text-similarity-ada-001"
-)
+gpt3_embeddings(input_var, id_var, param_model = "text-similarity-ada-001")
 }
 \arguments{
 \item{input_var}{character vector that contains the texts for which you want to obtain text embeddings from the GPT-3 model
@@ -20,7 +16,7 @@
 A data.table with the embeddings as separate columns; one row represents one input text. See details.
 }
 \description{
-\code{gpt3_bunch_embedding()} extends the single embeddings function \code{gpt3_make_embedding()} to allow for the processing of a whole vector
+\code{gpt3_embeddings()} extends the single embeddings function \code{gpt3_single_embedding()} to allow for the processing of a whole vector
 }
 \details{
 The returned data.table contains the column \code{id} which indicates the text id (or its generic alternative if not specified) and the columns \code{dim_1} ... \verb{dim_\{max\}}, where \code{max} is the length of the text embeddings vector that the four different models return. For the default "Ada" model, these are 1024 dimensions (i.e., \code{dim_1}... \code{dim_1024}).
@@ -41,16 +37,15 @@
 # First authenticate with your API key via `gpt3_authenticate('pathtokey')`
 
 # Use example data:
-## The data below were generated with the `gpt3_make_request()` function as follows:
+## The data below were generated with the `gpt3_single_request()` function as follows:
 ##### DO NOT RUN #####
-# travel_blog_data = gpt3_make_request(prompt_input = "Write a travel blog about a dog's journey through the UK:", temperature = 0.8, n = 10, max_tokens = 200)[[1]]
+# travel_blog_data = gpt3_single_request(prompt_input = "Write a travel blog about a dog's journey through the UK:", temperature = 0.8, n = 10, max_tokens = 200)[[1]]
 ##### END DO NOT RUN #####
 
 # You can load these data with:
 data("travel_blog_data") # the dataset contains 10 completions for the above request
 
-
 ## Obtain text embeddings for the completion texts:
-gpt3_bunch_embedding(input = sample_string
-    , model = 'text-similarity-curie-001')
+emb_travelblogs = gpt3_embeddings(input_var = travel_blog_data$gpt3)
+dim(emb_travelblogs)
 }
diff --git a/man/gpt3_bunch_request.Rd b/man/gpt3_requests.Rd
similarity index 89%
rename from man/gpt3_bunch_request.Rd
rename to man/gpt3_requests.Rd
index 347dfc4..b85d374 100644
--- a/man/gpt3_bunch_request.Rd
+++ b/man/gpt3_requests.Rd
@@ -1,10 +1,10 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/bunch_request.R
-\name{gpt3_bunch_request}
-\alias{gpt3_bunch_request}
+% Please edit documentation in R/gpt3_requests.R
+\name{gpt3_requests}
+\alias{gpt3_requests}
 \title{Makes bunch completion requests to the GPT-3 API}
 \usage{
-gpt3_bunch_request(
+gpt3_requests(
   prompt_var,
   id_var,
   param_output_type = "complete",
@@ -58,13 +58,13 @@
 If \code{output_type} is "meta", only the data table in slot [\link{2}] is returned.
 }
 \description{
-\code{gpt3_bunch_request()} is the package's main function for rquests and takes as input a vector of prompts and processes each prompt as per the defined parameters. It extends the \code{gpt3_make_request()} function to allow for bunch processing of requests to the Open AI GPT-3 API.
+\code{gpt3_requests()} is the package's main function for rquests and takes as input a vector of prompts and processes each prompt as per the defined parameters. It extends the \code{gpt3_single_request()} function to allow for bunch processing of requests to the Open AI GPT-3 API.
 }
 \details{
 The easiest (and intended) use case for this function is to create a data.frame or data.table with variables that contain the prompts to be requested from GPT-3 and a prompt id (see examples below).
 For a general guide on the completion requests, see \url{https://beta.openai.com/docs/guides/completion}. This function provides you with an R wrapper to send requests with the full range of request parameters as detailed on \url{https://beta.openai.com/docs/api-reference/completions} and reproduced below.
 
-For the \code{best_of} parameter: The \code{gpt3_make_request()} (which is used here in a vectorised manner) handles the issue that best_of must be greater than n by setting \code{if(best_of <= n){ best_of = n}}.
+For the \code{best_of} parameter: The \code{gpt3_single_request()} (which is used here in a vectorised manner) handles the issue that best_of must be greater than n by setting \code{if(best_of <= n){ best_of = n}}.
 
 If \code{id_var} is not provided, the function will use \code{prompt_1} ... \code{prompt_n} as id variable.
 
@@ -81,24 +81,24 @@
 # Once authenticated:
 # Assuming you have a data.table with 3 different prompts:
 dt_prompts = data.table::data.table('prompts' = c('What is the meaning if life?', 'Write a tweet about London:', 'Write a research proposal for using AI to fight fake news:'), 'prompt_id' = c(LETTERS[1:3]))
-gpt3_bunch_request(prompt_var = dt_prompts$prompts
+gpt3_requests(prompt_var = dt_prompts$prompts
    , id_var = dt_prompts$prompt_id)
 
 ## With more controls
-gpt3_bunch_request(prompt_var = dt_prompts$prompts
+gpt3_requests(prompt_var = dt_prompts$prompts
    , id_var = dt_prompts$prompt_id
    , param_max_tokens = 50
    , param_temperature = 0.5
    , param_n = 5)
 
 ## Reproducible example (deterministic approach)
-gpt3_bunch_request(prompt_var = dt_prompts$prompts
+gpt3_requests(prompt_var = dt_prompts$prompts
    , id_var = dt_prompts$prompt_id
    , param_max_tokens = 50
    , param_temperature = 0.0)
 
 ## Changing the GPT-3 model
-gpt3_bunch_request(prompt_var = dt_prompts$prompts
+gpt3_requests(prompt_var = dt_prompts$prompts
    , id_var = dt_prompts$prompt_id
    , param_model = 'text-babbage-001'
    , param_max_tokens = 50
diff --git a/man/gpt3_make_request.Rd b/man/gpt3_single_request.Rd
similarity index 91%
rename from man/gpt3_make_request.Rd
rename to man/gpt3_single_request.Rd
index 504c112..ae3f39a 100644
--- a/man/gpt3_make_request.Rd
+++ b/man/gpt3_single_request.Rd
@@ -1,10 +1,10 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/make_request.R
-\name{gpt3_make_request}
-\alias{gpt3_make_request}
+% Please edit documentation in R/gpt3_single_request.R
+\name{gpt3_single_request}
+\alias{gpt3_single_request}
 \title{Makes a single completion request to the GPT-3 API}
 \usage{
-gpt3_make_request(
+gpt3_single_request(
   prompt_input,
   model = "text-davinci-002",
   output_type = "complete",
@@ -55,7 +55,7 @@
 If \code{output_type} is "meta", only the data table in slot [\link{2}] is returned.
 }
 \description{
-\code{gpt3_make_request()} sends a single \href{https://beta.openai.com/docs/api-reference/completions}{completion request} to the Open AI GPT-3 API.
+\code{gpt3_single_request()} sends a single \href{https://beta.openai.com/docs/api-reference/completions}{completion request} to the Open AI GPT-3 API.
 }
 \details{
 For a general guide on the completion requests, see \url{https://beta.openai.com/docs/guides/completion}. This function provides you with an R wrapper to send requests with the full range of request parameters as detailed on \url{https://beta.openai.com/docs/api-reference/completions} and reproduced below.
@@ -75,21 +75,21 @@
 # Once authenticated:
 
 ## Simple request with defaults:
-gpt3_make_request(prompt_input = 'How old are you?')
+gpt3_single_request(prompt_input = 'How old are you?')
 
 ## Instruct GPT-3 to write ten research ideas of max. 150 tokens with some controls:
-gpt3_make_request(prompt_input = 'Write a research idea about using text data to understand human behaviour:'
+gpt3_single_request(prompt_input = 'Write a research idea about using text data to understand human behaviour:'
    , temperature = 0.8
    , n = 10
    , max_tokens = 150)
 
 ## For fully reproducible results, we need `temperature = 0`, e.g.:
-gpt3_make_request(prompt_input = 'Finish this sentence:/n There is no easier way to learn R than'
+gpt3_single_request(prompt_input = 'Finish this sentence:/n There is no easier way to learn R than'
     , temperature = 0.0
     , max_tokens = 50)
 
 ## The same example with a different GPT-3 model:
-gpt3_make_request(prompt_input = 'Finish this sentence:/n There is no easier way to learn R than'
+gpt3_single_request(prompt_input = 'Finish this sentence:/n There is no easier way to learn R than'
     , model = 'text-babbage-001'
     , temperature = 0.0
     , max_tokens = 50)
diff --git a/man/gpt3_make_embedding.Rd b/man/gpt_single_embedding.Rd
similarity index 79%
rename from man/gpt3_make_embedding.Rd
rename to man/gpt_single_embedding.Rd
index b339ab5..4742405 100644
--- a/man/gpt3_make_embedding.Rd
+++ b/man/gpt_single_embedding.Rd
@@ -1,10 +1,10 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/make_embedding.R
-\name{gpt3_make_embedding}
-\alias{gpt3_make_embedding}
+% Please edit documentation in R/gpt3_single_embedding.R
+\name{gpt_single_embedding}
+\alias{gpt_single_embedding}
 \title{Obtains text embeddings for a single character (string) from the GPT-3 API}
 \usage{
-gpt3_make_embedding(input, model = "text-similarity-ada-001")
+gpt_single_embedding(input, model = "text-similarity-ada-001")
 }
 \arguments{
 \item{input}{character that contains the text for which you want to obtain text embeddings from the GPT-3 model}
@@ -15,7 +15,7 @@
 A numeric vector (= the embedding vector)
 }
 \description{
-\code{gpt3_make_embedding()} sends a single \href{https://beta.openai.com/docs/guides/embeddings}{embedding request} to the Open AI GPT-3 API.
+\code{gpt_single_embedding()} sends a single \href{https://beta.openai.com/docs/guides/embeddings}{embedding request} to the Open AI GPT-3 API.
 }
 \details{
 The function supports the text similarity embeddings for the four GPT-3 models as specified in the parameter list. The main difference between the four models is the sophistication of the embedding representation as indicated by the vector embedding size.
@@ -37,9 +37,9 @@
 
 ## Simple request with defaults:
 sample_string = "London is one of the most liveable cities in the world. The city is always full of energy and people. It's always a great place to explore and have fun."
-gpt3_make_embedding(input = sample_string)
+gpt_single_embedding(input = sample_string)
 
 ## Change the model:
-#' gpt3_make_embedding(input = sample_string
+#' gpt_single_embedding(input = sample_string
     , model = 'text-similarity-curie-001')
 }