Brundage, Miles; Avin, Shahar; Clark, Jack; Toner, Helen; Eckersley, Peter; Garfinkel, Ben; Dafoe, Allan; Scharre, Paul; Zeitzoff, Thomas; Filar, Bobby; Anderson, Hyrum; Roff, Heather; Allen, Gregory C.; Steinhardt, Jacob; Flynn, Carrick; hÉigeartaigh, Seán Ó; Beard, Simon; Belfield, Haydn; Farquhar, Sebastian; Lyle, Clare; Crootof, Rebecca; Evans, Owain; Page, Michael; Bryson, Joanna; Yampolskiy, Roman; Amodei, Dario
The Malicious Use of Artificial Intelligence: Forecasting, Prevention, and Mitigation Miscellaneous
2018, (arXiv:1802.07228).
@misc{brundage_malicious_2018,
title = {The Malicious Use of Artificial Intelligence: Forecasting, Prevention, and Mitigation},
author = {Miles Brundage and Shahar Avin and Jack Clark and Helen Toner and Peter Eckersley and Ben Garfinkel and Allan Dafoe and Paul Scharre and Thomas Zeitzoff and Bobby Filar and Hyrum Anderson and Heather Roff and Gregory C. Allen and Jacob Steinhardt and Carrick Flynn and Seán Ó hÉigeartaigh and Simon Beard and Haydn Belfield and Sebastian Farquhar and Clare Lyle and Rebecca Crootof and Owain Evans and Michael Page and Joanna Bryson and Roman Yampolskiy and Dario Amodei},
url = {http://arxiv.org/abs/1802.07228},
doi = {10.48550/arXiv.1802.07228},
year = {2018},
date = {2018-02-01},
urldate = {2024-10-22},
publisher = {arXiv},
abstract = {This report surveys the landscape of potential security threats from malicious uses of AI, and proposes ways to better forecast, prevent, and mitigate these threats. After analyzing the ways in which AI may influence the threat landscape in the digital, physical, and political domains, we make four high-level recommendations for AI researchers and other stakeholders. We also suggest several promising areas for further research that could expand the portfolio of defenses, or make attacks less effective or harder to execute. Finally, we discuss, but do not conclusively resolve, the long-term equilibrium of attackers and defenders.},
note = {arXiv:1802.07228},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Crootof, Rebecca
Autonomour Weapon Systems and the Limits of Analogy Journal Article
In: Harvard National Security Journal, vol. 9, no. 2, pp. 51–83, 2018.
@article{crootof_autonomour_2018,
title = {Autonomour Weapon Systems and the Limits of Analogy},
author = {Rebecca Crootof},
url = {https://heinonline.org/HOL/P?h=hein.journals/harvardnsj9&i=277},
year = {2018},
date = {2018-01-01},
urldate = {2024-10-22},
journal = {Harvard National Security Journal},
volume = {9},
number = {2},
pages = {51–83},
abstract = {Autonomous weapon systems are often described either as more independent versions of weapons already in use or as humanoid robotic soldiers. In many ways, these analogies are useful. Analogies and allusions to popular culture make new technologies seem accessible, identify potential dangers, and buttress desired narratives. Most importantly from a legal perspective, analogical reasoning helps stretch existing law to cover developing technologies and minimize law-free zones.
But all potential analogies — weapon, combatant, child soldier, animal combatant — fail to address the legal issues raised by autonomous weapon systems, largely because they all misrepresent legally salient traits. Conceiving of autonomous weapon systems as weapons minimizes their capacity for independent and self-determined action, while the combatant, child soldier, and animal combatant comparisons overemphasize it. Furthermore, these discrete and embodied analogies limit our ability to think imaginatively about this new technology and anticipate how it might develop, thereby impeding our ability to properly regulate it.
We cannot simply graft legal regimes crafted to regulate other entities onto autonomous weapon systems. Instead, as is often the case when analogical reasoning cannot justifiably stretch extant law to answer novel legal questions, new supplemental law is needed. The sooner we escape the confines of these insufficient analogies, the sooner we can create appropriate and effective regulations for autonomous weapon systems.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
But all potential analogies — weapon, combatant, child soldier, animal combatant — fail to address the legal issues raised by autonomous weapon systems, largely because they all misrepresent legally salient traits. Conceiving of autonomous weapon systems as weapons minimizes their capacity for independent and self-determined action, while the combatant, child soldier, and animal combatant comparisons overemphasize it. Furthermore, these discrete and embodied analogies limit our ability to think imaginatively about this new technology and anticipate how it might develop, thereby impeding our ability to properly regulate it.
We cannot simply graft legal regimes crafted to regulate other entities onto autonomous weapon systems. Instead, as is often the case when analogical reasoning cannot justifiably stretch extant law to answer novel legal questions, new supplemental law is needed. The sooner we escape the confines of these insufficient analogies, the sooner we can create appropriate and effective regulations for autonomous weapon systems.
Crootof, Rebecca
A Meaningful Floor for Meaningful Human Control Autonomous Legal Reasoning: Legal and Ethical Issues in the Technologies in Conflict Journal Article
In: Temple International & Comparative Law Journal, vol. 30, no. 1, pp. 53–62, 2016.
@article{crootof_meaningful_2016,
title = {A Meaningful Floor for Meaningful Human Control Autonomous Legal Reasoning: Legal and Ethical Issues in the Technologies in Conflict},
author = {Rebecca Crootof},
url = {https://heinonline.org/HOL/P?h=hein.journals/tclj30&i=61},
year = {2016},
date = {2016-01-01},
urldate = {2024-10-22},
journal = {Temple International & Comparative Law Journal},
volume = {30},
number = {1},
pages = {53–62},
abstract = {The broad support for “meaningful human control” of autonomous weapon systems comes at a familiar legislative cost: there is no consensus as to what this principle requires. This paper describes attempts to define the concept; discusses benefits of retaining imprecision in a standard intended to regulate new technology through international consensus; and argues for an interpretative “floor” grounded on existing humanitarian protections.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Crootof, Rebecca
The Varied Law of Autonomous Weapon Systems Miscellaneous
2015.
@misc{crootof_varied_2015,
title = {The Varied Law of Autonomous Weapon Systems},
author = {Rebecca Crootof},
url = {https://papers.ssrn.com/abstract=2569322},
year = {2015},
date = {2015-02-01},
urldate = {2024-10-22},
publisher = {Social Science Research Network},
address = {Rochester, NY},
abstract = {What law governs autonomous weapon systems? Those who have addressed this subject tend to focus on the law of armed conflict and debate whether it is sufficiently flexible to regulate such weaponry. But while it will undoubtedly be one of the more significant sources of states’ legal obligations, the international laws applicable to the development or use of autonomous weapon systems are hardly limited to those rules regarding the conduct of hostilities. Other legal regimes — including international human rights law, the law of the sea, space law, and the law of state responsibility — may also be relevant to how states may lawfully create or employ autonomous weapon systems, resulting in a complex and evolving web of international governance.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Crootof, Rebecca
War Torts: Accountability for Autonomous Weapons Journal Article
In: University of Pennsylvania Law Review, vol. 164, no. 6, pp. 1347–1402, 2015.
@article{crootof_war_2015,
title = {War Torts: Accountability for Autonomous Weapons},
author = {Rebecca Crootof},
url = {https://heinonline.org/HOL/P?h=hein.journals/pnlr164&i=1375},
year = {2015},
date = {2015-01-01},
urldate = {2024-10-22},
journal = {University of Pennsylvania Law Review},
volume = {164},
number = {6},
pages = {1347–1402},
abstract = {Unlike conventional weapons or remotely operated drones, autonomous weapon systems can independently select and engage targets. As a result, they may take actions that look like war crimes — the sinking of a cruise ship, the destruction of a village, the downing of a passenger jet — without any individual acting intentionally or recklessly. Absent such willful action, no one can be held criminally liable under existing international law.
Criminal law aims to prohibit certain actions, and individual criminal liability allows for the evaluation of whether someone is guilty of a moral wrong. Given that a successful ban on autonomous weapon systems is unlikely (and possibly even detrimental), what is needed is a complementary legal regime that holds states accountable for the injurious wrongs that are the side effects of employing these uniquely effective but inherently unpredictable and dangerous weapons. Just as the Industrial Revolution fostered the development of modern tort law, autonomous weapon systems highlight the need for “war torts”: serious violations of international humanitarian law that give rise to state responsibility.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Criminal law aims to prohibit certain actions, and individual criminal liability allows for the evaluation of whether someone is guilty of a moral wrong. Given that a successful ban on autonomous weapon systems is unlikely (and possibly even detrimental), what is needed is a complementary legal regime that holds states accountable for the injurious wrongs that are the side effects of employing these uniquely effective but inherently unpredictable and dangerous weapons. Just as the Industrial Revolution fostered the development of modern tort law, autonomous weapon systems highlight the need for “war torts”: serious violations of international humanitarian law that give rise to state responsibility.
Crootof, Rebecca
The Killer Robots Are Here: Legal and Policy Implications Journal Article
In: Cardozo Law Review, vol. 36, no. 5, pp. 1837–1916, 2014.
@article{crootof_killer_2014,
title = {The Killer Robots Are Here: Legal and Policy Implications},
author = {Rebecca Crootof},
url = {https://heinonline.org/HOL/P?h=hein.journals/cdozo36&i=1943},
year = {2014},
date = {2014-01-01},
urldate = {2024-10-22},
journal = {Cardozo Law Review},
volume = {36},
number = {5},
pages = {1837–1916},
abstract = {In little over a year, the possibility of a complete ban on autonomous weapon systems — known colloquially as “killer robots” — has evolved from a proposal in an NGO report to the subject of an international meeting with representatives from over eighty states. However, no one has yet put forward a coherent definition of autonomy in weapon systems from a law of armed conflict perspective, which often results in the conflation of legal, ethical, policy, and political arguments. To address this problem and to assist future treaty negotiators, this Article proposes that an “autonomous weapon system” be defined as “a weapon system that, based on conclusions derived from gathered information and preprogrammed constraints, is capable of independently selecting and engaging targets.”
Applying this definition, and contrary to the nearly universal consensus, it quickly becomes apparent that autonomous weapon systems are not weapons of the future: they exist and have already been integrated into states’ armed forces. The fact that such weaponry is currently being used with little critique has a number of profound implications. First, it undermines pro-ban arguments based on the premise that autonomous weapon systems are inherently unlawful. Second, it significantly reduces the likelihood that a complete ban would be successful, as states will be unwilling to voluntarily relinquish otherwise lawful and uniquely effective weaponry.
But law is not doomed to follow technology: if used proactively, law can channel the development and use of autonomous weapon systems. The Article concludes that intentional international regulation is needed, now, and suggests how such regulation may be designed to incorporate beneficial legal limitations and humanitarian protections.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Applying this definition, and contrary to the nearly universal consensus, it quickly becomes apparent that autonomous weapon systems are not weapons of the future: they exist and have already been integrated into states’ armed forces. The fact that such weaponry is currently being used with little critique has a number of profound implications. First, it undermines pro-ban arguments based on the premise that autonomous weapon systems are inherently unlawful. Second, it significantly reduces the likelihood that a complete ban would be successful, as states will be unwilling to voluntarily relinquish otherwise lawful and uniquely effective weaponry.
But law is not doomed to follow technology: if used proactively, law can channel the development and use of autonomous weapon systems. The Article concludes that intentional international regulation is needed, now, and suggests how such regulation may be designed to incorporate beneficial legal limitations and humanitarian protections.
Crootof, Rebecca
War, Responsibility, and Killer Robots Journal Article
In: North Carolina Journal of International Law and Commercial Regulation, vol. 40, no. 4, pp. 909–932, 2014.
@article{crootof_war_2014,
title = {War, Responsibility, and Killer Robots},
author = {Rebecca Crootof},
url = {https://heinonline.org/HOL/P?h=hein.journals/ncjint40&i=941},
year = {2014},
date = {2014-01-01},
urldate = {2024-10-22},
journal = {North Carolina Journal of International Law and Commercial Regulation},
volume = {40},
number = {4},
pages = {909–932},
abstract = {Although many are concerned that autonomous weapon systems may make war “too easy,” no one has addressed how their use may alter the distribution of the constitutional war power. Drones, cyber operations, and other technological advances in weaponry already allow the United States to intervene militarily with minimal boots on the ground, and increased autonomy in weapon systems will further reduce risk to soldiers. As human troops are augmented and supplanted by robotic ones, it will be politically easier to justify using force, especially for short-term military engagements. Accordingly, one of the remaining incentives for Congress to check presidential warmongering — popular outrage at the loss of American lives — will diminish. The integration of autonomous weapon systems into U.S. military forces will therefore contribute to the growing concentration of the war power in the hands of the Executive, with implications for the international doctrine of humanitarian intervention.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}