@article {Goertzel:2012:1355-8250:96, title = "Should Humanity Build a Global AI Nanny to Delay the Singularity Until Its Better Understood?", journal = "Journal of Consciousness Studies", parent_itemid = "infobike://imp/jcs", publishercode ="imp", year = "2012", volume = "19", number = "1-2", publication date ="2012-01-01T00:00:00", pages = "96-111", itemtype = "ARTICLE", issn = "1355-8250", url = "https://www.ingentaconnect.com/content/imp/jcs/2012/00000019/f0020001/art00006", author = "Goertzel, Ben", abstract = "Chalmers suggests that, if a Singularity fails to occur in the next few centuries, the most likely reason will be 'motivational defeaters' i.e. at some point humanity or human-level AI may abandon the effort to create dramatically superhuman artificial general intelligence. Here I explore one (I argue) plausible way in which that might happen: the deliberate human creation of an 'AI Nanny' with mildly superhuman intelligence and surveillance powers, designed either to forestall Singularity eternally, or to delay the Singularity until humanity more fully understands how to execute a Singularity in a positive way. It is suggested that as technology progresses, humanity may find the creation of an AI Nanny desirable as a means of protecting against the destructive potential of various advanced technologies such as AI, nanotechnology and synthetic biology.", }