Calo, Ryan; Citron, Danielle Keats
The Automated Administrative State: A Crisis of Legitimacy Journal Article
In: Emory Law Journal, vol. 70, no. 4, pp. 797–846, 2021.
@article{calo_automated_2021,
title = {The Automated Administrative State: A Crisis of Legitimacy},
author = {Ryan Calo and Danielle Keats Citron},
url = {https://scholarlycommons.law.emory.edu/elj/vol70/iss4/1/},
year = {2021},
date = {2021-01-01},
urldate = {2024-10-09},
journal = {Emory Law Journal},
volume = {70},
number = {4},
pages = {797–846},
abstract = {The legitimacy of the administrative state is premised on our faith in agency expertise. Despite their extra-constitutional structure, administrative agencies have been on firm footing for a long time in reverence to their critical role in governing a complex, evolving society. They are delegated enormous power because they respond expertly and nimbly to evolving conditions. In recent decades, state and federal agencies have embraced a novel mode of operation: automation. Agencies rely more and more on software and algorithms in carrying out their delegated responsibilities. The automated administrative state, however, is demonstrably riddled with concerns. Legal challenges regarding the denial of benefits and rights—from travel to disability—have revealed a pernicious pattern of bizarre and unintelligible outcomes. Scholarship to date has explored the pitfalls of automation with a particular frame, asking how we might ensure that automation honors existing legal commitments such as due process. Missing from the conversation are broader, structural critiques of the legitimacy of agencies that automate. Automation abdicates the expertise and nimbleness that justify the administrative state, undermining the very case for the existence and authority of agencies. Yet the answer is not to deny agencies access to technology that other twenty-first century institutions rely upon. This Article points toward a positive vision of the administrative state that adopts tools only when they enhance, rather than undermine, the underpinnings of agency legitimacy.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Coglianese, Cary; Dor, Lavi M. Ben
AI in Adjudication and Administration Journal Article
In: Brooklyn Law Review, vol. 86, no. 3, pp. 791–838, 2020.
@article{coglianese_ai_2020,
title = {AI in Adjudication and Administration},
author = {Cary Coglianese and Lavi M. Ben Dor},
url = {https://brooklynworks.brooklaw.edu/blr/vol86/iss3/1/},
year = {2020},
date = {2020-01-01},
urldate = {2024-10-09},
journal = {Brooklyn Law Review},
volume = {86},
number = {3},
pages = {791–838},
abstract = {The use of artificial intelligence has expanded rapidly in recent years across many aspects of the economy. For federal, state, and local governments in the United States, interest in artificial intelligence has manifested in the use of a series of digital tools, including the occasional deployment of machine learning, to aid in the performance of a variety of governmental functions. In this Article, we canvass the current uses of such digital tools and machine-learning technologies by the judiciary and administrative agencies in the United States. Although we have yet to see fully automated decision-making find its way into either adjudication or administration, governmental entities at all levels are taking steps that could lead to the implementation of automated, machine-learning decision tools in the relatively near future. Within the federal and state court systems, for example, machine-learning tools have yet to be deployed, but other efforts have put in place digital building blocks toward such use. These efforts include the increased digitization of court records that algorithms will need to draw upon for data, the growth of online dispute resolution inside and outside of the courts, and the incorporation of non-learning risk assessment tools as inputs into bail, sentencing, and parole decisions. Administrative agencies have proven much more willing than courts to use machine-learning algorithms, deploying such algorithmic tools to help in the delivery of public services, management of government programs, and targeting of enforcement resources. We discuss already emerging concerns about the deployment of artificial intelligence and related digital tools to support judicial and administrative decision-making. If artificial intelligence is managed responsibly to address such concerns, the use of algorithmic tools by governmental entities throughout the United States would appear to show much future promise. This article’s canvass of current uses of algorithmic tools can serve as a benchmark against which to gauge future growth in the use of artificial intelligence in the public sector.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Engstrom, David Freeman; Ho, Daniel E.
Algorithmic Accountability in the Administrative State Special Issue: Regulating the Technological Frontier Journal Article
In: Yale Journal on Regulation, vol. 37, no. 3, pp. 800–854, 2020.
@article{engstrom_algorithmic_2020,
title = {Algorithmic Accountability in the Administrative State Special Issue: Regulating the Technological Frontier},
author = {David Freeman Engstrom and Daniel E. Ho},
url = {https://openyls.law.yale.edu/handle/20.500.13051/8311},
year = {2020},
date = {2020-01-01},
urldate = {2024-10-09},
journal = {Yale Journal on Regulation},
volume = {37},
number = {3},
pages = {800–854},
abstract = {How will artificial intelligence (AI) transform government? Stemming from a major study commissioned by the Administrative Conference of the United States (ACUS), we highlight the promise and trajectory of algorithmic tools used by federal agencies to perform the work of governance. Moving past the abstract mappings of transparency measures and regulatory mechanisms that pervade the current algorithmic accountability literature, our analysis centers around a detailed technical account of a pair of current applications that exemplify AI’s move to the center of the redistributive and coercive power of the state: the Social Security Administration’s use of AI tools to adjudicate disability benefits cases and the Securities and Exchange Commission’s use of AI tools to target enforcement efforts under federal securities law. We argue that the next generation of work will need to push past a narrow focus on constitutional law and instead engage with the broader terrain of administrative law, which is far more likely to modulate use of algorithmic governance tools going forward.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Leslie, David
Understanding artificial intelligence ethics and safety: A guide for the responsible design and implementation of AI systems in the public sector Technical Report
The Alan Turing Institute 2019.
@techreport{leslie_understanding_2019,
title = {Understanding artificial intelligence ethics and safety: A guide for the responsible design and implementation of AI systems in the public sector},
author = {David Leslie},
url = {https://zenodo.org/records/3240529},
doi = {10.5281/zenodo.3240529},
year = {2019},
date = {2019-06-01},
urldate = {2024-10-09},
institution = {The Alan Turing Institute},
abstract = {A remarkable time of human promise has been ushered in by the convergence of the ever-expanding availability of big data, the soaring speed and stretch of cloud computing platforms, and the advancement of increasingly sophisticated machine learning algorithms. Innovations in AI are already leaving a mark on government by improving the provision of essential social goods and services from healthcare, education, and transportation to food supply, energy, and environmental management. These bounties are likely just the start. The prospect that progress in AI will help government to confront some of its most urgent challenges is exciting, but legitimate worries abound. As with any new and rapidly evolving technology, a steep learning curve means that mistakes and miscalculations will be made and that both unanticipated and harmful impacts will occur.
This guide, written for department and delivery leads in the UK public sector and adopted by the British Government in its publication, 'Using AI in the Public Sector,' identifies the potential harms caused by AI systems and proposes concrete, operationalisable measures to counteract them. It stresses that public sector organisations can anticipate and prevent these potential harms by stewarding a culture of responsible innovation and by putting in place governance processes that support the design and implementation of ethical, fair, and safe AI systems. It also highlights the need for algorithmically supported outcomes to be interpretable by their users and made understandable to decision subjects in clear, non-technical, and accessible ways. Finally, it builds out a vision of human-centred and context-sensitive implementation that gives a central role to communication, evidence-based reasoning, situational awareness, and moral justifiability.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
This guide, written for department and delivery leads in the UK public sector and adopted by the British Government in its publication, 'Using AI in the Public Sector,' identifies the potential harms caused by AI systems and proposes concrete, operationalisable measures to counteract them. It stresses that public sector organisations can anticipate and prevent these potential harms by stewarding a culture of responsible innovation and by putting in place governance processes that support the design and implementation of ethical, fair, and safe AI systems. It also highlights the need for algorithmically supported outcomes to be interpretable by their users and made understandable to decision subjects in clear, non-technical, and accessible ways. Finally, it builds out a vision of human-centred and context-sensitive implementation that gives a central role to communication, evidence-based reasoning, situational awareness, and moral justifiability.
Re, Richard M.; Solow-Niederman, Alicia
Developing Artificially Intelligent Justice Journal Article
In: Stanford Technology Law Review, vol. 22, no. 2, pp. 242–289, 2019.
@article{re_developing_2019,
title = {Developing Artificially Intelligent Justice},
author = {Richard M. Re and Alicia Solow-Niederman},
url = {https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3390854},
year = {2019},
date = {2019-01-01},
urldate = {2024-10-09},
journal = {Stanford Technology Law Review},
volume = {22},
number = {2},
pages = {242–289},
abstract = {Artificial intelligence, or AI, promises to assist, modify, and replace human decision-making, including in court. AI already supports many aspects of how judges decide cases, and the prospect of “robot judges” suddenly seems plausible—even imminent. This Article argues that AI adjudication will profoundly affect the adjudicatory values held by legal actors as well as the public at large. The impact is likely to be greatest in areas, including criminal justice and appellate decision-making, where “equitable justice,” or discretionary moral judgment, is frequently considered paramount. By offering efficiency and at least an appearance of impartiality, AI adjudication will both foster and benefit from a turn toward “codified justice,” an adjudicatory paradigm that favors standardization above discretion. Further, AI adjudication will generate a range of concerns relating to its tendency to make the legal system more incomprehensible, data-based, alienating, and disillusioning. And potential responses, such as crafting a division of labor between human and AI adjudicators, each pose their own challenges. The single most promising response is for the government to play a greater role in structuring the emerging market for AI justice, but auspicious reform proposals would borrow several interrelated approaches. Similar dynamics will likely extend to other aspects of government, such that choices about how to incorporate AI in the judiciary will inform the future path of AI development more broadly.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Carrel, Alyson
In: Georgia State University Law Review, vol. 35, no. 4, pp. 1153–1184, 2018.
@article{carrel_legal_2018,
title = {Legal Intelligence through Artificial Intelligence Requires Emotional Intelligence: A New Competency Model for the 21st Century Legal Professional},
author = {Alyson Carrel},
url = {https://readingroom.law.gsu.edu/gsulr/vol35/iss4/4/},
year = {2018},
date = {2018-01-01},
urldate = {2024-10-09},
journal = {Georgia State University Law Review},
volume = {35},
number = {4},
pages = {1153–1184},
abstract = {The nature of legal services is drastically changing given the rise in the use of artificial intelligence and machine learning. Legal education and training models are beginning to recognize the need to incorporate skill building in data and technology platforms, but they have lost sight of a core competency for lawyers: problem-solving and decision-making skills to counsel clients on how best to meet their desired goals and needs. In 2014, Amani Smathers introduced the legal field to the concept of the T-shaped lawyer. The T-shaped lawyer stems from the concept of T-shaped professionals who have a depth of knowledge in their chosen discipline and a breadth of knowledge in other tangentially related disciplines in order to enhance collaboration with other professionals. The concept of a T-shaped lawyer recognizes that lawyers not only need in-depth legal knowledge and skills but an understanding of data, technology, project management, and process improvement to be competent legal professionals. The T-shaped lawyer brought attention to the need for lawyers and law students to expand their training and begin learning about artificial intelligence, design thinking, data analytics, and more. However, the T-shaped lawyer obscures the core competency of decision-making and problem-solving. This paper introduces the Delta Model for legal professional competency that not only recognizes the need for lawyers to have deep legal knowledge and skills, as well as an understanding of data and technology, but also recognizes the need for emotional intelligence in decision-making and problem-solving. The Delta Model could reshape how we approach legal education and lawyer training, providing a structure that more accurately reflects the breadth of skills that a twenty-first-century lawyer needs.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Katyal, Sonia K.
The Paradox of Source Code Secrecy Journal Article
In: Cornell Law Review, vol. 104, no. 5, pp. 1183–1280, 2018.
@article{katyal_paradox_2018,
title = {The Paradox of Source Code Secrecy},
author = {Sonia K. Katyal},
url = {https://scholarship.law.cornell.edu/clr/vol104/iss5/2/},
year = {2018},
date = {2018-01-01},
urldate = {2024-10-09},
journal = {Cornell Law Review},
volume = {104},
number = {5},
pages = {1183–1280},
abstract = {In Lear v. Adkins, the Supreme Court precipitously wrote, "federal law requires that all ideas in general circulation be dedicated to the common good unless they are protected by a valid patent." Today, it is clear that trade secrecy's dominance over source code has been a significant cause for concern in cases involving the public interest. To protect civil rights in the age of automated decision making, I argue, we must limit opportunities for seclusion in areas of intellectual property, criminal justice, and governance more generally. The solution, therefore, does not require a complete overhaul of the existing system, but rather a more nuanced, granular approach that seeks to balance the interest of disclosure and public access with the substantial values of protection, privacy, and property.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Volokh, Eugene
Chief Justice Robots Journal Article
In: Duke Law Journal, vol. 68, no. 6, pp. 1135–1192, 2018.
@article{volokh_chief_2018,
title = {Chief Justice Robots},
author = {Eugene Volokh},
url = {https://scholarship.law.duke.edu/dlj/vol68/iss6/2/},
year = {2018},
date = {2018-01-01},
urldate = {2024-10-09},
journal = {Duke Law Journal},
volume = {68},
number = {6},
pages = {1135–1192},
abstract = {Say an AI program someday passes a Turing test, because it can con-verse in a way indistinguishable from a human. And say that its develop-ers can then teach it to converse—and even present an extended persua-sive argument—in a way indistinguishable from the sort of human we call a “lawyer.” The program could thus become an AI brief-writer, ca-pable of regularly winning brief-writing competitions against human lawyers.
Once that happens (if it ever happens), this Essay argues, the same technology can be used to create AI judges, judges that we should accept as no less reliable (and more cost-effective) than human judges. If the software can create persuasive opinions, capable of regularly winning opinion-writing competitions against human judges—and if it can be adequately protected against hacking and similar attacks—we should in principle accept it as a judge, even if the opinions do not stem from human judgment.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Once that happens (if it ever happens), this Essay argues, the same technology can be used to create AI judges, judges that we should accept as no less reliable (and more cost-effective) than human judges. If the software can create persuasive opinions, capable of regularly winning opinion-writing competitions against human judges—and if it can be adequately protected against hacking and similar attacks—we should in principle accept it as a judge, even if the opinions do not stem from human judgment.
Cohen, Julie E.
In: DePaul Law Review, vol. 66, no. 2, pp. 535–578, 2016.
@article{cohen_information_2016,
title = {Information Privacy Litigation as Bellwether for Institutional Change Symposium - Privacy, Data Theft and Corporate Responsibility: Twenty-Second Annual Clifford Symposium on Tort Law and Social Policy},
author = {Julie E. Cohen},
url = {https://via.library.depaul.edu/law-review/vol66/iss2/9/},
year = {2016},
date = {2016-01-01},
urldate = {2024-10-09},
journal = {DePaul Law Review},
volume = {66},
number = {2},
pages = {535–578},
abstract = {Information privacy litigation is controversial and headline-grabbing. New class complaints are filed seemingly every few weeks. Legal scholars vie with one another to articulate more comprehensive theories of harm that such lawsuits might vindicate. Large information businesses and defense counsel bemoan the threats that information privacy litigation poses to corporate bottom lines and to “innovation” more generally. For all that, though, the track record of litigation achievements on the information privacy front is stunningly poor. This essay examines emerging conventions for disposing of information privacy claims, including denial of standing, enforcement of boilerplate waivers, denial of class certification, and the rise of the cy pres settlement. It argues that, in an era of complex, informationally-mediated harms, the information privacy lawsuit is a marker of both institutional stress and institutional opportunity. The inability of most information privacy claims to gain meaningful traction reflects the influence of powerful repeat players interested in minimizing their exposure to claims of informational injury. But it also raises important questions about how judicial processes can be adapted to deal with the predominantly informational and infrastructural harms that increasingly characterize our networked, information-based political economy.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Coglianese, Cary; Lehr, David
Regulating by Robot: Administrative Decision Making in the Machine-Learning Era Journal Article
In: Georgetown Law Journal, vol. 105, no. 5, pp. 1147–1224, 2016.
@article{coglianese_regulating_2016,
title = {Regulating by Robot: Administrative Decision Making in the Machine-Learning Era},
author = {Cary Coglianese and David Lehr},
url = {https://scholarship.law.upenn.edu/faculty_scholarship/1734/},
year = {2016},
date = {2016-01-01},
urldate = {2024-10-09},
journal = {Georgetown Law Journal},
volume = {105},
number = {5},
pages = {1147–1224},
abstract = {Machine-learning algorithms are transforming large segments of the economy as they fuel innovation in search engines, self-driving cars, product marketing, and medical imaging, among many other technologies. As machine learning’s use expands across all facets of society, anxiety has emerged about the intrusion of algorithmic machines into facets of life previously dependent on human judgment. Alarm bells sounding over the diffusion of artificial intelligence throughout the private sector only portend greater anxiety about digital robots replacing humans in the governmental sphere. A few administrative agencies have already begun to adopt this technology, while others have clear potential in the near term to use algorithms to shape official decisions over both rulemaking and adjudication. It is no longer fanciful to envision a future in which government agencies could effectively make law by robot, a prospect that understandably conjures up dystopian images of individuals surrendering their liberty to the control of computerized overlords. Should society be alarmed by governmental use of machine-learning applications? We examine this question by considering whether the use of robotic decision tools by government agencies can pass muster under core, time-honored doctrines of administrative and constitutional law. At first glance, the idea of algorithmic regulation might appear to offend one or more traditional doctrines, such as the nondelegation doctrine, procedural due process, equal protection, or principles of reason-giving and transparency. We conclude, however, that when machine-learning technology is properly understood, its use by government agencies can comfortably fit within these conventional legal parameters. We recognize, of course, that the legality of regulation by robot is only one criterion by which its use should be assessed. Agencies should not apply algorithms cavalierly, even if doing so might not run afoul of the law; in some cases, safeguards may be needed for machine learning to satisfy broader, good-governance aspirations. Yet, in contrast with the emerging alarmism, we resist any categorical dismissal of a future administrative state in which algorithmic automation guides, and even at times makes, key decisions. Instead, we urge that governmental reliance on machine learning should be approached with measured optimism about the potential benefits such technology can offer society by making government smarter and its decisions more efficient and just.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Surden, Harry
Machine Learning and Law Journal Article
In: Washington Law Review, vol. 89, no. 1, pp. 87–116, 2014.
@article{surden_machine_2014,
title = {Machine Learning and Law},
author = {Harry Surden},
url = {https://digitalcommons.law.uw.edu/wlr/vol89/iss1/5/},
year = {2014},
date = {2014-01-01},
urldate = {2024-10-09},
journal = {Washington Law Review},
volume = {89},
number = {1},
pages = {87–116},
abstract = {This Article explores the application of machine learning techniques within the practice of law. Broadly speaking “machine learning” refers to computer algorithms that have the ability to “learn” or improve in performance over time on some task. In general, machine learning algorithms are designed to detect patterns in data and then apply these patterns going forward to new data in order to automate particular tasks. Outside of law, machine learning techniques have been successfully applied to automate tasks that were once thought to necessitate human intelligence — for example language translation, fraud-detection, driving automobiles, facial recognition, and data-mining. If performing well, machine learning algorithms can produce automated results that approximate those that would have been made by a similarly situated person.
This Article begins by explaining some basic principles underlying machine learning methods, in a manner accessible to non-technical audiences. The second part explores a broader puzzle: legal practice is thought to require advanced cognitive abilities, but such higher-order cognition remains outside the capability of current machine-learning technology. This part identifies a core principle: how certain tasks that are normally thought to require human intelligence can sometimes be automated through the use of non-intelligent computational techniques that employ heuristics or proxies (e.g., statistical correlations) capable of producing useful, “intelligent” results. The third part applies this principle to the practice of law, discussing machine-learning automation in the context of certain legal tasks currently performed by attorneys: including predicting the outcomes of legal cases, finding hidden relationships in legal documents and data, electronic discovery, and the automated organization of documents.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
This Article begins by explaining some basic principles underlying machine learning methods, in a manner accessible to non-technical audiences. The second part explores a broader puzzle: legal practice is thought to require advanced cognitive abilities, but such higher-order cognition remains outside the capability of current machine-learning technology. This part identifies a core principle: how certain tasks that are normally thought to require human intelligence can sometimes be automated through the use of non-intelligent computational techniques that employ heuristics or proxies (e.g., statistical correlations) capable of producing useful, “intelligent” results. The third part applies this principle to the practice of law, discussing machine-learning automation in the context of certain legal tasks currently performed by attorneys: including predicting the outcomes of legal cases, finding hidden relationships in legal documents and data, electronic discovery, and the automated organization of documents.
Ashley, Kevin; Branting, Karl; Margolis, Howard; Sunstein, Cass
Legal Reasoning and Artificial Intelligence: How Computers "Think" Like Lawyers Journal Article
In: The University of Chicago Law School Roundtable, vol. 8, no. 1, 2001, ISSN: 1075-9166.
@article{ashley_legal_2001,
title = {Legal Reasoning and Artificial Intelligence: How Computers "Think" Like Lawyers},
author = {Kevin Ashley and Karl Branting and Howard Margolis and Cass Sunstein},
url = {https://chicagounbound.uchicago.edu/roundtable/vol8/iss1/2},
issn = {1075-9166},
year = {2001},
date = {2001-01-01},
journal = {The University of Chicago Law School Roundtable},
volume = {8},
number = {1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}