@article{Cohen_Vellambi_Hutter_2020, title={Asymptotically Unambitious Artificial General Intelligence}, volume={34}, url={https://ojs.aaai.org/index.php/AAAI/article/view/5628}, DOI={10.1609/aaai.v34i03.5628}, abstractNote={<p>General intelligence, the ability to solve arbitrary solvable problems, is supposed by many to be artificially constructible. Narrow intelligence, the ability to solve a given particularly difficult problem, has seen impressive recent development. Notable examples include self-driving cars, Go engines, image classifiers, and translators. Artificial General Intelligence (AGI) presents dangers that narrow intelligence does not: if something smarter than us across every domain were indifferent to our concerns, it would be an existential threat to humanity, just as we threaten many species despite no ill will. Even the theory of how to maintain the alignment of an AGI’s goals with our own has proven highly elusive. We present the first algorithm we are aware of for asymptotically unambitious AGI, where “unambitiousness” includes not seeking arbitrary power. Thus, we identify an exception to the Instrumental Convergence Thesis, which is roughly that by default, an AGI <em>would</em> seek power, including over us.</p>}, number={03}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cohen, Michael and Vellambi, Badri and Hutter, Marcus}, year={2020}, month={Apr.}, pages={2467-2476} }