| @article{korngiebel2021considering, |
| title={Considering the possibilities and pitfalls of Generative Pre-trained Transformer 3 (GPT-3) in healthcare delivery}, |
| author={Korngiebel, Diane M and Mooney, Sean D}, |
| journal={NPJ Digital Medicine}, |
| volume={4}, |
| number={1}, |
| pages={1--3}, |
| year={2021}, |
| publisher={Nature Publishing Group} |
| } |
| |
| @article{binz2022using, |
| title={Using cognitive psychology to understand GPT-3}, |
| author={Binz, Marcel and Schulz, Eric}, |
| journal={arXiv preprint arXiv:2206.14576}, |
| year={2022} |
| } |
| |
| @article{stevenson2022putting, |
| title={Putting GPT-3's Creativity to the (Alternative Uses) Test}, |
| author={Stevenson, Claire and Smal, Iris and Baas, Matthijs and Grasman, Raoul and van der Maas, Han}, |
| journal={arXiv preprint arXiv:2206.08932}, |
| year={2022} |
| } |
| |
| @article{garg2018word, |
| title={Word embeddings quantify 100 years of gender and ethnic stereotypes}, |
| author={Garg, Nikhil and Schiebinger, Londa and Jurafsky, Dan and Zou, James}, |
| journal={Proceedings of the National Academy of Sciences}, |
| volume={115}, |
| number={16}, |
| pages={E3635--E3644}, |
| year={2018}, |
| publisher={National Acad Sciences} |
| } |
| |
| @inproceedings{bender2021dangers, |
| title={On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?}, |
| author={Bender, Emily M and Gebru, Timnit and McMillan-Major, Angelina and Shmitchell, Shmargaret}, |
| booktitle={Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency}, |
| pages={610--623}, |
| year={2021} |
| } |
| |
| @article{dale2021gpt, |
| title={GPT-3: What’s it good for?}, |
| author={Dale, Robert}, |
| journal={Natural Language Engineering}, |
| volume={27}, |
| number={1}, |
| pages={113--118}, |
| year={2021}, |
| publisher={Cambridge University Press} |
| } |
| |
| @article{brown2020language, |
| title={Language models are few-shot learners}, |
| author={Brown, Tom and Mann, Benjamin and Ryder, Nick and Subbiah, Melanie and Kaplan, Jared D and Dhariwal, Prafulla and Neelakantan, Arvind and Shyam, Pranav and Sastry, Girish and Askell, Amanda and others}, |
| journal={Advances in neural information processing systems}, |
| volume={33}, |
| pages={1877--1901}, |
| year={2020} |
| } |
| |
| @article{floridi2020gpt, |
| title={GPT-3: Its nature, scope, limits, and consequences}, |
| author={Floridi, Luciano and Chiriatti, Massimo}, |
| journal={Minds and Machines}, |
| volume={30}, |
| number={4}, |
| pages={681--694}, |
| year={2020}, |
| publisher={Springer} |
| } |
| |
| |
| @article{rahwan2019machine, |
| title={Machine behaviour}, |
| author={Rahwan, Iyad and Cebrian, Manuel and Obradovich, Nick and Bongard, Josh and Bonnefon, Jean-Fran{\c{c}}ois and Breazeal, Cynthia and Crandall, Jacob W and Christakis, Nicholas A and Couzin, Iain D and Jackson, Matthew O and others}, |
| journal={Nature}, |
| volume={568}, |
| number={7753}, |
| pages={477--486}, |
| year={2019}, |
| publisher={Nature Publishing Group} |
| } |
| |
| @article{black2022gpt, |
| title={Gpt-neox-20b: An open-source autoregressive language model}, |
| author={Black, Sid and Biderman, Stella and Hallahan, Eric and Anthony, Quentin and Gao, Leo and Golding, Laurence and He, Horace and Leahy, Connor and McDonell, Kyle and Phang, Jason and others}, |
| journal={arXiv preprint arXiv:2204.06745}, |
| year={2022} |
| } |
| |
| |
| @article{miotto_who_2022, |
| title = {Who is {GPT}-3? {An} {Exploration} of {Personality}, {Values} and {Demographics}}, |
| shorttitle = {Who is {GPT}-3?}, |
| url = {http://arxiv.org/abs/2209.14338}, |
| doi = {10.48550/arXiv.2209.14338}, |
| abstract = {Language models such as GPT-3 have caused a furore in the research community. Some studies found that GPT-3 has some creative abilities and makes mistakes that are on par with human behaviour. This paper answers a related question: who is GPT-3? We administered two validated measurement tools to GPT-3 to assess its personality, the values it holds and its self-reported demographics. Our results show that GPT-3 scores similarly to human samples in terms of personality and - when provided with a model response memory - in terms of the values it holds. We provide the first evidence of psychological assessment of the GPT-3 model and thereby add to our understanding of the GPT-3 model. We close with suggestions for future research that moves social science closer to language models and vice versa.}, |
| urldate = {2022-10-03}, |
| author = {Miotto, Marilù and Rossberg, Nicola and Kleinberg, Bennett}, |
| month = sep, |
| year = {2022}, |
| note = {arXiv:2209.14338 [cs]}, |
| keywords = {Computer Science - Computation and Language}, |
| file = {arXiv.org Snapshot:/Users/bennettkleinberg/Zotero/storage/6NCPEUG4/2209.html:text/html}, |
| } |
| |
| @article{shihadehbrilliance, |
| title={Brilliance Bias in GPT-3}, |
| author={Shihadeh, Juliana and Ackerman, Margareta and Troske, Ashley and Lawson, Nicole and Gonzalez, Edith} |
| } |