Chang, Cheng-chi
When AI Remembers Too Much: Reinventing the Right to Be Forgotten for the Generative Age Journal Article
In: Washington Journal of Law, Technology & Arts, vol. 19, no. 3, 2024, ISSN: 2157-2534.
@article{chang_when_2024,
title = {When AI Remembers Too Much: Reinventing the Right to Be Forgotten for the Generative Age},
author = {Cheng-chi Chang},
url = {https://digitalcommons.law.uw.edu/wjlta/vol19/iss3/2},
issn = {2157-2534},
year = {2024},
date = {2024-06-01},
journal = {Washington Journal of Law, Technology & Arts},
volume = {19},
number = {3},
abstract = {The emergence of generative artificial intelligence (AI) systems poses novel challenges for the right to be forgotten. While this right gained prominence following the 2014 Google Spain v. Gonzalez case, generative AI’s limitless memory and ability to reproduce identifiable data from fragments threaten traditional conceptions of forgetting. This Article traces the evolution of the right to be forgotten from its privacy law origins towards an independent entitlement grounded in self-determination for personal information. However, it contends the inherent limitations of using current anonymization, deletion, and geographical blocking mechanisms to prevent AI models from retaining personal data render forgetting infeasible. Moreover, the technical costs of forgetting—including tracking derivations and retraining models—could undermine enforceability. Therefore, this article advocates for a balanced legal approach that acknowledges the value of the right to forget while considering the constraints of implementing the right for generative AI. Although existing frameworks like the European Union’s GDPR provide a foundation, continuous regulatory evolution through oversight bodies and industry collaboration is imperative. This article underscores how the right to be forgotten must be reconceptualized to address the reality of generative AI systems. It provides an interdisciplinary analysis of this right’s limitations and proposes strategies to reconcile human dignity and autonomy with the emerging technological realities of AI. This Article’s original contribution lies in its nuanced approach to integrating legal and technical dimensions to develop adaptive frameworks for the right to be forgotten in the age of generative AI.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Adams, Zoe; Adams-Prassl, Abi; Adams-Prassl, Jeremias
Online tribunal judgments and the limits of open justice Journal Article
In: Legal Studies, vol. 42, no. 1, pp. 42–60, 2022, ISSN: 0261-3875, 1748-121X.
@article{adams_online_2022,
title = {Online tribunal judgments and the limits of open justice},
author = {Zoe Adams and Abi Adams-Prassl and Jeremias Adams-Prassl},
url = {https://www.cambridge.org/core/journals/legal-studies/article/abs/online-tribunal-judgments-and-the-limits-of-open-justice/4B4BDF453875CCA1769129027686D6AE},
doi = {10.1017/lst.2021.30},
issn = {0261-3875, 1748-121X},
year = {2022},
date = {2022-03-01},
urldate = {2024-10-22},
journal = {Legal Studies},
volume = {42},
number = {1},
pages = {42–60},
abstract = {The principle of open justice is a constituent element of the rule of law: it demands publicity of legal proceedings, including the publication of judgments. Since 2017, the UK government has systematically published first instance Employment Tribunal decisions in an online repository. Whilst a veritable treasure trove for researchers and policy makers, the database also has darker potential – from automating blacklisting to creating new and systemic barriers to access to justice. Our scrutiny of existing legal safeguards, from anonymity orders to equality law and data protection, finds a number of gaps, which threaten to make the principle of open justice as embodied in the current publication regime inimical to equal access to justice.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kaminski, Margot E.; M., Urban Jennifer
The Right to Contest AI Journal Article
In: Columbia Law Review, vol. 121, no. 7, pp. 1957–2048, 2021.
@article{kaminski_right_2021,
title = {The Right to Contest AI},
author = {Margot E. Kaminski and Urban Jennifer M.},
url = {https://heinonline.org/HOL/P?h=hein.journals/clr121&i=2031},
year = {2021},
date = {2021-01-01},
urldate = {2024-10-22},
journal = {Columbia Law Review},
volume = {121},
number = {7},
pages = {1957–2048},
abstract = {Artificial intelligence (AI) is increasingly used to make important decisions, from university admissions selections to loan determinations to the distribution of COVID-19 vaccines. These uses of AI raise a host of concerns about discrimination, accuracy, fairness, and accountability.
In the United States, recent proposals for regulating AI focus largely on ex ante and systemic governance. This Article argues instead—or really, in addition—for an individual right to contest AI decisions, modeled on due process but adapted for the digital age. The European Union, in fact, recognizes such a right, and a growing number of institutions around the world now call for its establishment. This Article argues that despite considerable differences between the United States and other countries,establishing the right to contest AI decisions here would be in keeping with a long tradition of due process theory.
This Article then fills a gap in the literature, establishing a theoretical scaffolding for discussing what a right to contest should look like in practice. This Article establishes four contestation archetypes that should serve as the bases of discussions of contestation both for the right to contest AI and in other policy contexts. The contestation archetypes vary along two axes: from contestation rules to standards and from emphasizing procedure to establishing substantive rights. This Article then discusses four processes that illustrate these archetypes in practice, including the first in depth consideration of the GDPR’s right to contestation for a U.S. audience. Finally, this Article integrates findings from these investigations to develop normative and practical guidance for establishing a right to contest AI.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
In the United States, recent proposals for regulating AI focus largely on ex ante and systemic governance. This Article argues instead—or really, in addition—for an individual right to contest AI decisions, modeled on due process but adapted for the digital age. The European Union, in fact, recognizes such a right, and a growing number of institutions around the world now call for its establishment. This Article argues that despite considerable differences between the United States and other countries,establishing the right to contest AI decisions here would be in keeping with a long tradition of due process theory.
This Article then fills a gap in the literature, establishing a theoretical scaffolding for discussing what a right to contest should look like in practice. This Article establishes four contestation archetypes that should serve as the bases of discussions of contestation both for the right to contest AI and in other policy contexts. The contestation archetypes vary along two axes: from contestation rules to standards and from emphasizing procedure to establishing substantive rights. This Article then discusses four processes that illustrate these archetypes in practice, including the first in depth consideration of the GDPR’s right to contestation for a U.S. audience. Finally, this Article integrates findings from these investigations to develop normative and practical guidance for establishing a right to contest AI.
Fu, Runshan; Huang, Yan; Singh, Param Vir
Artificial Intelligence and Algorithmic Bias: Source, Detection, Mitigation, and Implications Book Section
In: Pushing the Boundaries: Frontiers in Impactful OR/OM Research, pp. 39–63, INFORMS, 2020, ISBN: 978-0-9906153-4-7, (Section: 2).
@incollection{fu_artificial_2020,
title = {Artificial Intelligence and Algorithmic Bias: Source, Detection, Mitigation, and Implications},
author = {Runshan Fu and Yan Huang and Param Vir Singh},
url = {https://pubsonline.informs.org/doi/10.1287/educ.2020.0215},
doi = {10.1287/educ.2020.0215},
isbn = {978-0-9906153-4-7},
year = {2020},
date = {2020-11-01},
urldate = {2024-10-22},
booktitle = {Pushing the Boundaries: Frontiers in Impactful OR/OM Research},
pages = {39–63},
publisher = {INFORMS},
series = {INFORMS TutORials in Operations Research},
abstract = {Artificial intelligence (AI) and machine learning (ML) algorithms are widely used throughout our economy in making decisions that have far-reaching impacts on employment, education, access to credit, and other areas. Initially considered neutral and fair, ML algorithms have recently been found increasingly biased, creating and perpetuating structural inequalities in society. With the rising concerns about algorithmic bias, a growing body of literature attempts to understand and resolve the issue of algorithmic bias. In this tutorial, we discuss five important aspects of algorithmic bias. We start with its definition and the notions of fairness policy makers, practitioners, and academic researchers have used and proposed. Next, we note the challenges in identifying and detecting algorithmic bias given the observed decision outcome, and we describe methods for bias detection. We then explain the potential sources of algorithmic bias and review several bias-correction methods. Finally, we discuss how agents’ strategic behavior may lead to biased societal outcomes, even when the algorithm itself is unbiased. We conclude by discussing open questions and future research directions.},
note = {Section: 2},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Hartzog, Woodrow; Richards, Neil
Privacy's Constitutional Moment and the Limits of Data Protection Journal Article
In: Boston College Law Review, vol. 61, no. 5, pp. 1687–1762, 2020.
@article{hartzog_privacys_2020,
title = {Privacy's Constitutional Moment and the Limits of Data Protection},
author = {Woodrow Hartzog and Neil Richards},
url = {https://heinonline.org/HOL/P?h=hein.journals/bclr61&i=1713},
year = {2020},
date = {2020-01-01},
urldate = {2024-10-22},
journal = {Boston College Law Review},
volume = {61},
number = {5},
pages = {1687–1762},
abstract = {America’s privacy bill has come due. Since the dawn of the Internet, Congress has repeatedly failed to build a robust identity for American privacy law. But now both California and the European Union have forced Congress’s hand by passing the California Consumer Privacy Act (CCPA) and the General Data Protection Regulation (GDPR). These data protection frameworks, structured around principles for Fair Information Processing called the “FIPs,” have industry and privacy advocates alike clamoring for a “U.S. GDPR.” States seemed poised to blanket the country with FIP-based laws if Congress fails to act. The United States is thus in the midst of a “constitutional moment” for privacy, in which intense public deliberation and action may bring about constitutive and structural change. And the European data protection model of the GDPR is ascendant.
In this article we highlight the risks of U.S. lawmakers embracing a watered-down version of the European model as American privacy law enters its constitutional moment. European-style data protection rules have undeniable virtues, but they won’t be enough. The FIPs assume data processing is always a worthy goal, but even fairly processed data can lead to oppression and abuse. Data protection is also myopic because it ignores how industry’s appetite for data is wrecking our environment, our democracy, our attention spans, and our emotional health. Even if E.U.-style data protection were sufficient, the United States is too different from Europe to implement and enforce such a framework effectively on its European law terms. Any U.S. GDPR would in practice be what we call a “GDPR-Lite.”
Our argument is simple: In the United States, a data protection model cannot do it all for privacy, though if current trends continue, we will likely entrench it as though it can. Drawing from constitutional theory and the traditions of privacy regulation in the United States, we propose instead a “comprehensive approach” to privacy that is better focused on power asymmetries, corporate structures, and a broader vision of human well-being. Settling for an American GDPR-lite would be a tragic ending to a real opportunity to tackle the critical problems of the information age. In this constitutional moment for privacy, we can and should demand more. This article offers a path forward to do just that.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
In this article we highlight the risks of U.S. lawmakers embracing a watered-down version of the European model as American privacy law enters its constitutional moment. European-style data protection rules have undeniable virtues, but they won’t be enough. The FIPs assume data processing is always a worthy goal, but even fairly processed data can lead to oppression and abuse. Data protection is also myopic because it ignores how industry’s appetite for data is wrecking our environment, our democracy, our attention spans, and our emotional health. Even if E.U.-style data protection were sufficient, the United States is too different from Europe to implement and enforce such a framework effectively on its European law terms. Any U.S. GDPR would in practice be what we call a “GDPR-Lite.”
Our argument is simple: In the United States, a data protection model cannot do it all for privacy, though if current trends continue, we will likely entrench it as though it can. Drawing from constitutional theory and the traditions of privacy regulation in the United States, we propose instead a “comprehensive approach” to privacy that is better focused on power asymmetries, corporate structures, and a broader vision of human well-being. Settling for an American GDPR-lite would be a tragic ending to a real opportunity to tackle the critical problems of the information age. In this constitutional moment for privacy, we can and should demand more. This article offers a path forward to do just that.
Zittrain, Jonathan L.
Three Eras of Digital Governance Miscellaneous
2019.
@misc{zittrain_three_2019,
title = {Three Eras of Digital Governance},
author = {Jonathan L. Zittrain},
url = {https://papers.ssrn.com/abstract=3458435},
doi = {10.2139/ssrn.3458435},
year = {2019},
date = {2019-09-01},
urldate = {2024-10-22},
publisher = {Social Science Research Network},
address = {Rochester, NY},
abstract = {To understand where digital governance is going, we must take stock of where it’s been, because the timbre of mainstream thinking around digital governance today is dramatically different than it was when study of “Internet governance” coalesced in the late 1990s.Perhaps the most obvious change has been from emphasizing networked technologies’ positive effects and promise – couched around concepts like connectivity, innovation, and, by this author, “generativity” – to pointing out their harms and threats. It’s not that threats weren’t previously recognized, but rather that they were more often seen in external clamps on technological development and upon the corresponding new freedoms for users, whether government intervention to block VOIP services like Skype to protect incumbent telco revenues, or in the shaping of technology to effect undue surveillance, whether for government or corporate purposes.The shift in emphasis from positive to negative corresponds to a change in the overarching frameworks for talking about regulating information technology. We have moved from a discourse around rights – particularly those of end-users, and the ways in which abstention by intermediaries is important to facilitate citizen flourishing – to one of public health, which naturally asks for a weighing of the systemic benefits or harms of a technology, and to think about what systemic interventions might curtail its apparent excesses.Each framework captures important values around the use of technology that can both empower and limit individual freedom of action, including to engage in harmful conduct. Our goal today should be to identify where competing values frameworks themselves preclude understanding of others’ positions about regulation, and to see if we can map a path forward that, if not reconciling the frameworks, allows for satisfying, if ever-evolving, resolutions to immediate questions of public and private governance.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Buiten, Miriam C.
Towards Intelligent Regulation of Artificial Intelligence Journal Article
In: European Journal of Risk Regulation, vol. 10, no. 1, pp. 41–59, 2019, ISSN: 1867-299X, 2190-8249.
@article{buiten_towards_2019,
title = {Towards Intelligent Regulation of Artificial Intelligence},
author = {Miriam C. Buiten},
url = {https://www.cambridge.org/core/journals/european-journal-of-risk-regulation/article/towards-intelligent-regulation-of-artificial-intelligence/AF1AD1940B70DB88D2B24202EE933F1B},
doi = {10.1017/err.2019.8},
issn = {1867-299X, 2190-8249},
year = {2019},
date = {2019-03-01},
urldate = {2024-10-22},
journal = {European Journal of Risk Regulation},
volume = {10},
number = {1},
pages = {41–59},
abstract = {Artificial intelligence (AI) is becoming a part of our daily lives at a fast pace, offering myriad benefits for society. At the same time, there is concern about the unpredictability and uncontrollability of AI. In response, legislators and scholars call for more transparency and explainability of AI. This article considers what it would mean to require transparency of AI. It advocates looking beyond the opaque concept of AI, focusing on the concrete risks and biases of its underlying technology: machine-learning algorithms. The article discusses the biases that algorithms may produce through the input data, the testing of the algorithm and the decision model. Any transparency requirement for algorithms should result in explanations of these biases that are both understandable for the prospective recipients, and technically feasible for producers. Before asking how much transparency the law should require from algorithms, we should therefore consider if the explanation that programmers could offer is useful in specific legal contexts.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Katyal, Sonia K.
Private Accountability in the Age of Artificial Intelligence Journal Article
In: UCLA Law Review, vol. 66, no. 1, pp. 54–141, 2019.
@article{katyal_private_2019,
title = {Private Accountability in the Age of Artificial Intelligence},
author = {Sonia K. Katyal},
url = {https://heinonline.org/HOL/P?h=hein.journals/uclalr66&i=64},
year = {2019},
date = {2019-01-01},
urldate = {2024-10-22},
journal = {UCLA Law Review},
volume = {66},
number = {1},
pages = {54–141},
abstract = {In this Article, I explore the impending conflict between the protection of civil rights and artificial intelligence (AI). While both areas of law have amassed rich and well-developed areas of scholarly work and doctrinal support, a growing body of scholars are interrogating the intersection between them. This Article argues that the issues surrounding algorithmic accountability demonstrate a deeper, more structural tension within a new generation of disputes regarding law and technology. As I argue, the true promise of AI does not lie in the information we reveal to one another, but rather in the questions it raises about the interaction of technology, property, and civil rights.
For this reason, I argue that we are looking in the wrong place if we look only to the state to address issues of algorithmic accountability. Instead, we must turn to other ways to ensure more transparency and accountability that stem from private industry, rather than public regulation. The issue of algorithmic bias represents a crucial new world of civil rights concerns, one that is distinct in nature from the ones that preceded it. Since we are in a world where the activities of private corporations, rather than the state, are raising concerns about privacy, due process, and discrimination, we must focus on the role of private corporations in addressing the issue. Towards this end, I discuss a variety of tools to help eliminate the opacity of AI, including codes of conduct, impact statements, and whistleblower protection, which I argue carries the potential to encourage greater endogeneity in civil rights enforcement. Ultimately, by examining the relationship between private industry and civil rights, we can perhaps develop a new generation of forms of accountability in the process.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
For this reason, I argue that we are looking in the wrong place if we look only to the state to address issues of algorithmic accountability. Instead, we must turn to other ways to ensure more transparency and accountability that stem from private industry, rather than public regulation. The issue of algorithmic bias represents a crucial new world of civil rights concerns, one that is distinct in nature from the ones that preceded it. Since we are in a world where the activities of private corporations, rather than the state, are raising concerns about privacy, due process, and discrimination, we must focus on the role of private corporations in addressing the issue. Towards this end, I discuss a variety of tools to help eliminate the opacity of AI, including codes of conduct, impact statements, and whistleblower protection, which I argue carries the potential to encourage greater endogeneity in civil rights enforcement. Ultimately, by examining the relationship between private industry and civil rights, we can perhaps develop a new generation of forms of accountability in the process.
Cohen, Julie E.
Internet Utopianism and the Practical Inevitability of Law The Past and Future of the Internet: A Symposium for John Perry Barlow Journal Article
In: Duke Law & Technology Review, vol. 18, pp. 85–96, 2019.
@article{cohen_internet_2019,
title = {Internet Utopianism and the Practical Inevitability of Law The Past and Future of the Internet: A Symposium for John Perry Barlow},
author = {Julie E. Cohen},
url = {https://heinonline.org/HOL/P?h=hein.journals/dltr18&i=85},
year = {2019},
date = {2019-01-01},
urldate = {2024-10-22},
journal = {Duke Law & Technology Review},
volume = {18},
pages = {85–96},
abstract = {Writing at the dawn of the digital era, John Perry Barlow proclaimed cyberspace to be a new domain of pure freedom. Addressing the nations of the world, he cautioned that their laws, which were “based on matter,” simply did not speak to conduct in the new virtual realm. As both Barlow and the cyberlaw scholars who took up his call recognized, that was not so much a statement of fact as it was an exercise in deliberate utopianism. But it has proved prescient in a way that they certainly did not intend. The “laws” that increasingly have no meaning in online environments include not only the mandates of market regulators but also the guarantees that supposedly protect the fundamental rights of internet users, including the expressive and associational freedoms whose supremacy Barlow asserted. More generally, in the networked information era, protections for fundamental human rights — both on- and offline — have begun to fail comprehensively.
Cyberlaw scholarship in the Barlowian mold isn’t to blame for the worldwide erosion of protections for fundamental rights, but it also hasn’t helped as much as it might have. In this essay, adapted from a forthcoming book on the evolution of legal institutions in the information era, I identify and briefly examine three intersecting flavors of internet utopianism in cyberlegal thought that are worth reexamining. It has become increasingly apparent that functioning legal institutions have indispensable roles to play in protecting and advancing human freedom. It has also become increasingly apparent, however, that the legal institutions we need are different than the ones we have.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Cyberlaw scholarship in the Barlowian mold isn’t to blame for the worldwide erosion of protections for fundamental rights, but it also hasn’t helped as much as it might have. In this essay, adapted from a forthcoming book on the evolution of legal institutions in the information era, I identify and briefly examine three intersecting flavors of internet utopianism in cyberlegal thought that are worth reexamining. It has become increasingly apparent that functioning legal institutions have indispensable roles to play in protecting and advancing human freedom. It has also become increasingly apparent, however, that the legal institutions we need are different than the ones we have.
Wachter, Sandra; Mittelstadt, Brent
A Right to Reasonable Inferences: Re-Thinking Data Protection Law in the Age of Big Data and AI Survey: Privacy, Data, and Business Journal Article
In: Columbia Business Law Review, vol. 2019, no. 2, pp. 494–620, 2019.
@article{wachter_right_2019,
title = {A Right to Reasonable Inferences: Re-Thinking Data Protection Law in the Age of Big Data and AI Survey: Privacy, Data, and Business},
author = {Sandra Wachter and Brent Mittelstadt},
url = {https://heinonline.org/HOL/P?h=hein.journals/colb2019&i=506},
year = {2019},
date = {2019-01-01},
urldate = {2024-10-22},
journal = {Columbia Business Law Review},
volume = {2019},
number = {2},
pages = {494–620},
abstract = {Big Data analytics and artificial intelligence (AI) draw non-intuitive and unverifiable inferences and predictions about the behaviors, preferences, and private lives of individuals. These inferences draw on highly diverse and feature-rich data of unpredictable value, and create new opportunities for discriminatory, biased, and invasive decision-making. Data protection law is meant to protect people’s privacy, identity, reputation, and autonomy, but is currently failing to protect data subjects from the novel risks of inferential analytics. The legal status of inferences is heavily disputed in legal scholarship, and marked by inconsistencies and contradictions within and between the views of the Article 29 Working Party and the European Court of Justice (ECJ).
This Article shows that individuals are granted little control and oversight over how their personal data is used to draw inferences about them. Compared to other types of personal data, inferences are effectively ‘economy class’ personal data in the General Data Protection Regulation (GDPR). Data subjects’ rights to know about (Art 13-15), rectify (Art 16), delete (Art 17), object to (Art 21), or port (Art 20) personal data are significantly curtailed for inferences. The GDPR also provides insufficient protection against sensitive inferences (Art 9) or remedies to challenge inferences or important decisions based on them (Art 22(3)).
This situation is not accidental. In standing jurisprudence the ECJ has consistently restricted the remit of data protection law to assessing the legitimacy of input personal data undergoing processing, and to rectify, block, or erase it. Critically, the ECJ has likewise made clear that data protection law is not intended to ensure the accuracy of decisions and decision-making processes involving personal data, or to make these processes fully transparent. Current policy proposals addressing privacy protection (the ePrivacy Regulation and the EU Digital Content Directive) and Europe’s new Copyright Directive and Trade Secrets Directive also fail to close the GDPR’s accountability gaps concerning inferences.
This Article argues that a new data protection right, the ‘right to reasonable inferences’, is needed to help close the accountability gap currently posed by ‘high risk inferences’ , meaning inferences drawn from Big Data analytics that damage privacy or reputation, or have low verifiability in the sense of being predictive or opinion-based while being used in important decisions. This right would require ex-ante justification to be given by the data controller to establish whether an inference is reasonable. This disclosure would address (1) why certain data form a normatively acceptable basis from which to draw inferences; (2) why these inferences are relevant and normatively acceptable for the chosen processing purpose or type of automated decision; and (3) whether the data and methods used to draw the inferences are accurate and statistically reliable. The ex-ante justification is bolstered by an additional ex-post mechanism enabling unreasonable inferences to be challenged.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
This Article shows that individuals are granted little control and oversight over how their personal data is used to draw inferences about them. Compared to other types of personal data, inferences are effectively ‘economy class’ personal data in the General Data Protection Regulation (GDPR). Data subjects’ rights to know about (Art 13-15), rectify (Art 16), delete (Art 17), object to (Art 21), or port (Art 20) personal data are significantly curtailed for inferences. The GDPR also provides insufficient protection against sensitive inferences (Art 9) or remedies to challenge inferences or important decisions based on them (Art 22(3)).
This situation is not accidental. In standing jurisprudence the ECJ has consistently restricted the remit of data protection law to assessing the legitimacy of input personal data undergoing processing, and to rectify, block, or erase it. Critically, the ECJ has likewise made clear that data protection law is not intended to ensure the accuracy of decisions and decision-making processes involving personal data, or to make these processes fully transparent. Current policy proposals addressing privacy protection (the ePrivacy Regulation and the EU Digital Content Directive) and Europe’s new Copyright Directive and Trade Secrets Directive also fail to close the GDPR’s accountability gaps concerning inferences.
This Article argues that a new data protection right, the ‘right to reasonable inferences’, is needed to help close the accountability gap currently posed by ‘high risk inferences’ , meaning inferences drawn from Big Data analytics that damage privacy or reputation, or have low verifiability in the sense of being predictive or opinion-based while being used in important decisions. This right would require ex-ante justification to be given by the data controller to establish whether an inference is reasonable. This disclosure would address (1) why certain data form a normatively acceptable basis from which to draw inferences; (2) why these inferences are relevant and normatively acceptable for the chosen processing purpose or type of automated decision; and (3) whether the data and methods used to draw the inferences are accurate and statistically reliable. The ex-ante justification is bolstered by an additional ex-post mechanism enabling unreasonable inferences to be challenged.
Cohen, Julie E.
The Biopolitical Public Domain: the Legal Construction of the Surveillance Economy Journal Article
In: Philosophy & Technology, vol. 31, no. 2, pp. 213–233, 2018, ISSN: 2210-5441.
@article{cohen_biopolitical_2018,
title = {The Biopolitical Public Domain: the Legal Construction of the Surveillance Economy},
author = {Julie E. Cohen},
url = {https://doi.org/10.1007/s13347-017-0258-2},
doi = {10.1007/s13347-017-0258-2},
issn = {2210-5441},
year = {2018},
date = {2018-06-01},
urldate = {2024-10-22},
journal = {Philosophy & Technology},
volume = {31},
number = {2},
pages = {213–233},
abstract = {Within the political economy of informational capitalism, commercial surveillance practices are tools for resource extraction. That process requires an enabling legal construct, which this essay identifies and explores. Contemporary practices of personal information processing constitute a new type of public domain—a repository of raw materials that are there for the taking and that are framed as inputs to particular types of productive activity. As a legal construct, the biopolitical public domain shapes practices of appropriation and use of personal information in two complementary and interrelated ways. First, it constitutes personal information as available and potentially valuable: as a pool of materials that may be freely appropriated as inputs to economic production. That framing supports the reorganization of sociotechnical activity in ways directed toward extraction and appropriation. Second, the biopolitical public domain constitutes the personal information harvested within networked information environments as raw. That framing creates the backdrop for culturally situated techniques of knowledge production and for the logic that designates those techniques as sites of legal privilege.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Villaronga, Eduard Fosch; Kieseberg, Peter; Li, Tiffany
Humans forget, machines remember: Artificial intelligence and the Right to Be Forgotten Journal Article
In: Computer Law & Security Review, vol. 34, no. 2, pp. 304–313, 2018, ISSN: 0267-3649.
@article{villaronga_humans_2018,
title = {Humans forget, machines remember: Artificial intelligence and the Right to Be Forgotten},
author = {Eduard Fosch Villaronga and Peter Kieseberg and Tiffany Li},
url = {https://www.sciencedirect.com/science/article/pii/S0267364917302091},
doi = {10.1016/j.clsr.2017.08.007},
issn = {0267-3649},
year = {2018},
date = {2018-04-01},
urldate = {2024-10-22},
journal = {Computer Law & Security Review},
volume = {34},
number = {2},
pages = {304–313},
abstract = {This article examines the problem of AI memory and the Right to Be Forgotten. First, this article analyzes the legal background behind the Right to Be Forgotten, in order to understand its potential applicability to AI, including a discussion on the antagonism between the values of privacy and transparency under current E.U. privacy law. Next, the authors explore whether the Right to Be Forgotten is practicable or beneficial in an AI/machine learning context, in order to understand whether and how the law should address the Right to Be Forgotten in a post-AI world. The authors discuss the technical problems faced when adhering to strict interpretation of data deletion requirements under the Right to Be Forgotten, ultimately concluding that it may be impossible to fulfill the legal aims of the Right to Be Forgotten in artificial intelligence environments. Finally, this article addresses the core issue at the heart of the AI and Right to Be Forgotten problem: the unfortunate dearth of interdisciplinary scholarship supporting privacy law and regulation.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yeung, Karen
Algorithmic regulation: A critical interrogation Journal Article
In: Regulation & Governance, vol. 12, no. 4, pp. 505–523, 2018, ISSN: 1748-5991, (_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/rego.12158).
@article{yeung_algorithmic_2018,
title = {Algorithmic regulation: A critical interrogation},
author = {Karen Yeung},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/rego.12158},
doi = {10.1111/rego.12158},
issn = {1748-5991},
year = {2018},
date = {2018-01-01},
urldate = {2024-10-22},
journal = {Regulation & Governance},
volume = {12},
number = {4},
pages = {505–523},
abstract = {Innovations in networked digital communications technologies, including the rise of “Big Data,” ubiquitous computing, and cloud storage systems, may be giving rise to a new system of social ordering known as algorithmic regulation. Algorithmic regulation refers to decisionmaking systems that regulate a domain of activity in order to manage risk or alter behavior through continual computational generation of knowledge by systematically collecting data (in real time on a continuous basis) emitted directly from numerous dynamic components pertaining to the regulated environment in order to identify and, if necessary, automatically refine (or prompt refinement of) the system's operations to attain a pre-specified goal. This study provides a descriptive analysis of algorithmic regulation, classifying these decisionmaking systems as either reactive or pre-emptive, and offers a taxonomy that identifies eight different forms of algorithmic regulation based on their configuration at each of the three stages of the cybernetic process: notably, at the level of standard setting (adaptive vs. fixed behavioral standards), information-gathering and monitoring (historic data vs. predictions based on inferred data), and at the level of sanction and behavioral change (automatic execution vs. recommender systems). It maps the contours of several emerging debates surrounding algorithmic regulation, drawing upon insights from regulatory governance studies, legal critiques, surveillance studies, and critical data studies to highlight various concerns about the legitimacy of algorithmic regulation.},
note = {_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/rego.12158},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kaminski, Margot E.
Binary Governance: Lessons from the GDPR's Approach to Algorithmic Accountability Journal Article
In: Southern California Law Review, vol. 92, no. 6, pp. 1529–1616, 2018.
@article{kaminski_binary_2018,
title = {Binary Governance: Lessons from the GDPR's Approach to Algorithmic Accountability},
author = {Margot E. Kaminski},
url = {https://heinonline.org/HOL/P?h=hein.journals/scal92&i=1624},
year = {2018},
date = {2018-01-01},
urldate = {2024-10-22},
journal = {Southern California Law Review},
volume = {92},
number = {6},
pages = {1529–1616},
abstract = {Algorithms are now used to make significant decisions about individuals, from credit determinations to hiring and firing. But they are largely unregulated under U.S. law. A quickly growing literature has split on how to address algorithmic decision-making, with individual rights and accountability to nonexpert stakeholders and to the public at the crux of the debate. In this Article, I make the case for why both individual rights and public- and stakeholder-facing accountability are not just goods in and of themselves but crucial components of effective governance. Only individual rights can fully address dignitary and justificatory concerns behind calls for regulating algorithmic decision-making. And without some form of public and stakeholder accountability, collaborative public-private approaches to systemic governance of algorithms will fail.
In this Article, I identify three categories of concern behind calls for regulating algorithmic decision-making: dignitary, justificatory, and instrumental. Dignitary concerns lead to proposals that we regulate algorithms to protect human dignity and autonomy; justificatory concerns caution that we must assess the legitimacy of algorithmic reasoning; and instrumental concerns lead to calls for regulation to prevent consequent problems such as error and bias. No one regulatory approach can effectively address all three. I therefore propose a two-pronged approach to algorithmic governance: a system of individual due process rights combined with systemic regulation achieved through collaborative governance (the use of private-public partnerships). Only through this binary approach can we effectively address all three concerns raised by algorithmic decision-making, or decision-making by Artificial Intelligence (“AI”).
The interplay between the two approaches will be complex. Sometimes the two systems will be complementary, and at other times, they will be in tension. The European Union’s (“EU’s”) General Data Protection Regulation (“GDPR”) is one such binary system. I explore the extensive collaborative governance aspects of the GDPR and how they interact with its individual rights regime. Understanding the GDPR in this way both illuminates its strengths and weaknesses and provides a model for how to construct a better governance regime for accountable algorithmic, or AI, decision-making. It shows, too, that in the absence of public and stakeholder accountability, individual rights can have a significant role to play in establishing the legitimacy of a collaborative regime.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
In this Article, I identify three categories of concern behind calls for regulating algorithmic decision-making: dignitary, justificatory, and instrumental. Dignitary concerns lead to proposals that we regulate algorithms to protect human dignity and autonomy; justificatory concerns caution that we must assess the legitimacy of algorithmic reasoning; and instrumental concerns lead to calls for regulation to prevent consequent problems such as error and bias. No one regulatory approach can effectively address all three. I therefore propose a two-pronged approach to algorithmic governance: a system of individual due process rights combined with systemic regulation achieved through collaborative governance (the use of private-public partnerships). Only through this binary approach can we effectively address all three concerns raised by algorithmic decision-making, or decision-making by Artificial Intelligence (“AI”).
The interplay between the two approaches will be complex. Sometimes the two systems will be complementary, and at other times, they will be in tension. The European Union’s (“EU’s”) General Data Protection Regulation (“GDPR”) is one such binary system. I explore the extensive collaborative governance aspects of the GDPR and how they interact with its individual rights regime. Understanding the GDPR in this way both illuminates its strengths and weaknesses and provides a model for how to construct a better governance regime for accountable algorithmic, or AI, decision-making. It shows, too, that in the absence of public and stakeholder accountability, individual rights can have a significant role to play in establishing the legitimacy of a collaborative regime.
Zittrain, Jonathan L.; Faris, Robert; Noman, Helmi; Clark, Justin; Tilton, Casey; Morrison-Westphal, Ryan
The Shifting Landscape of Global Internet Censorship Miscellaneous
2017.
@misc{zittrain_shifting_2017,
title = {The Shifting Landscape of Global Internet Censorship},
author = {Jonathan L. Zittrain and Robert Faris and Helmi Noman and Justin Clark and Casey Tilton and Ryan Morrison-Westphal},
url = {https://papers.ssrn.com/abstract=2993485},
doi = {10.2139/ssrn.2993485},
year = {2017},
date = {2017-06-01},
urldate = {2024-10-22},
publisher = {Social Science Research Network},
address = {Rochester, NY},
abstract = {A sharp increase in web encryption and a worldwide shift away from standalone websites in favor of social media and online publishing platforms has altered the practice of state-level Internet censorship and in some cases led to broader crackdowns, the Internet Monitor project at the Berkman Klein Center for Internet & Society at Harvard University finds. This study documents the practice of Internet censorship around the world through empirical testing in 45 countries of the availability of 2,046 of the world’s most-trafficked and influential websites, plus additional country-specific websites. The study finds evidence of filtering in 26 countries across four broad content themes: political, social, topics related to conflict and security, and Internet tools (a term that includes censorship circumvention tools as well as social media platforms). The majority of countries that censor content do so across all four themes, although the depth of the filtering varies.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Calo, Ryan
Artificial Intelligence Policy: A Primer and Roadmap Journal Article
In: U.C. Davis Law Review, vol. 51, no. 2, pp. 399–436, 2017.
@article{calo_artificial_2017,
title = {Artificial Intelligence Policy: A Primer and Roadmap},
author = {Ryan Calo},
url = {https://heinonline.org/HOL/P?h=hein.journals/davlr51&i=413},
year = {2017},
date = {2017-01-01},
urldate = {2024-10-22},
journal = {U.C. Davis Law Review},
volume = {51},
number = {2},
pages = {399–436},
abstract = {Talk of artificial intelligence is everywhere. People marvel at the capacity of machines to translate any language and master any game. Others condemn the use of secret algorithms to sentence criminal defendants or recoil at the prospect of machines gunning for blue, pink, and white-collar jobs. Some worry aloud that artificial intelligence will be humankind’s “final invention.”
This essay, prepared in connection with UC Davis Law Review's 50th anniversary symposium, explains why AI is suddenly on everyone's mind and provides a roadmap to the major policy questions AI raises. The essay is designed to help policymakers, investors, technologists, scholars, and students understand the contemporary policy environment around AI at least well enough to initiate their own exploration.
Topics covered include:
• Justice and equity
• Use of force
• Safety and certification
• Privacy (including data parity); and
• Taxation and displacement of labor
In addition to these topics, the essay will touch briefly on a selection of broader systemic questions:
• Institutional configuration and expertise
• Investment and procurement • Removing hurdles to accountability; and
• Correcting mental models of AI},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
This essay, prepared in connection with UC Davis Law Review's 50th anniversary symposium, explains why AI is suddenly on everyone's mind and provides a roadmap to the major policy questions AI raises. The essay is designed to help policymakers, investors, technologists, scholars, and students understand the contemporary policy environment around AI at least well enough to initiate their own exploration.
Topics covered include:
• Justice and equity
• Use of force
• Safety and certification
• Privacy (including data parity); and
• Taxation and displacement of labor
In addition to these topics, the essay will touch briefly on a selection of broader systemic questions:
• Institutional configuration and expertise
• Investment and procurement • Removing hurdles to accountability; and
• Correcting mental models of AI
Selbst, Andrew D.
Disparate Impact in Big Data Policing Journal Article
In: Georgia Law Review, vol. 52, no. 1, pp. 109–196, 2017.
@article{selbst_disparate_2017,
title = {Disparate Impact in Big Data Policing},
author = {Andrew D. Selbst},
url = {https://heinonline.org/HOL/P?h=hein.journals/geolr52&i=121},
year = {2017},
date = {2017-01-01},
urldate = {2024-10-22},
journal = {Georgia Law Review},
volume = {52},
number = {1},
pages = {109–196},
abstract = {Data-driven decision systems are taking over. No institution in society seems immune from the enthusiasm that automated decision-making generates, including—and perhaps especially—the police. Police departments are increasingly deploying data mining techniques to predict, prevent, and investigate crime. But all data mining systems have the potential for adverse impacts on vulnerable communities, and predictive policing is no different. Determining individuals’ threat levels by reference to commercial and social data can improperly link dark skin to higher threat levels or to greater suspicion of having committed a particular crime. Crime mapping based on historical data can lead to more arrests for nuisance crimes in neighborhoods primarily populated by people of color. These effects are an artifact of the technology itself, and will likely occur even assuming good faith on the part of the police departments using it. Meanwhile, predictive policing is sold in part as a “neutral” method to counteract unconscious biases when it is not simply sold to cash-strapped departments as a more cost- efficient way to do policing.
The degree to which predictive policing systems have these discriminatory results is unclear to the public and to the police themselves, largely because there is no incentive in place for a department focused solely on “crime control” to spend resources asking the question. This is a problem for which existing law does not provide a solution. Finding that neither the typical constitutional modes of police regulation nor a hypothetical anti-discrimination law would provide a solution, this Article turns toward a new regulatory proposal centered on “algorithmic impact statements.”
Modeled on the environmental impact statements of the National Environmental Policy Act, algorithmic impact statements would require police departments to evaluate the efficacy and potential discriminatory effects of all available choices for predictive policing technologies. The regulation would also allow the public to weigh in through a notice-and-comment process. Such a regulation would fill the knowledge gap that makes future policy discussions about the costs and benefits of predictive policing all but impossible. Being primarily procedural, it would not necessarily curtail a department determined to discriminate, but by forcing departments to consider the question and allowing society to understand the scope of the problem, it is a first step towards solving the problem and determining whether further intervention is required.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
The degree to which predictive policing systems have these discriminatory results is unclear to the public and to the police themselves, largely because there is no incentive in place for a department focused solely on “crime control” to spend resources asking the question. This is a problem for which existing law does not provide a solution. Finding that neither the typical constitutional modes of police regulation nor a hypothetical anti-discrimination law would provide a solution, this Article turns toward a new regulatory proposal centered on “algorithmic impact statements.”
Modeled on the environmental impact statements of the National Environmental Policy Act, algorithmic impact statements would require police departments to evaluate the efficacy and potential discriminatory effects of all available choices for predictive policing technologies. The regulation would also allow the public to weigh in through a notice-and-comment process. Such a regulation would fill the knowledge gap that makes future policy discussions about the costs and benefits of predictive policing all but impossible. Being primarily procedural, it would not necessarily curtail a department determined to discriminate, but by forcing departments to consider the question and allowing society to understand the scope of the problem, it is a first step towards solving the problem and determining whether further intervention is required.
Metcalf, Jacob; Crawford, Kate
Where are human subjects in Big Data research? The emerging ethics divide Journal Article
In: Big Data & Society, vol. 3, no. 1, pp. 2053951716650211, 2016, ISSN: 2053-9517, (Publisher: SAGE Publications Ltd).
@article{metcalf_where_2016,
title = {Where are human subjects in Big Data research? The emerging ethics divide},
author = {Jacob Metcalf and Kate Crawford},
url = {https://doi.org/10.1177/2053951716650211},
doi = {10.1177/2053951716650211},
issn = {2053-9517},
year = {2016},
date = {2016-06-01},
urldate = {2024-10-22},
journal = {Big Data & Society},
volume = {3},
number = {1},
pages = {2053951716650211},
abstract = {There are growing discontinuities between the research practices of data science and established tools of research ethics regulation. Some of the core commitments of existing research ethics regulations, such as the distinction between research and practice, cannot be cleanly exported from biomedical research to data science research. Such discontinuities have led some data science practitioners and researchers to move toward rejecting ethics regulations outright. These shifts occur at the same time as a proposal for major revisions to the Common Rule—the primary regulation governing human-subjects research in the USA—is under consideration for the first time in decades. We contextualize these revisions in long-running complaints about regulation of social science research and argue data science should be understood as continuous with social sciences in this regard. The proposed regulations are more flexible and scalable to the methods of non-biomedical research, yet problematically largely exclude data science methods from human-subjects regulation, particularly uses of public datasets. The ethical frameworks for Big Data research are highly contested and in flux, and the potential harms of data science research are unpredictable. We examine several contentious cases of research harms in data science, including the 2014 Facebook emotional contagion study and the 2016 use of geographical data techniques to identify the pseudonymous artist Banksy. To address disputes about application of human-subjects research ethics in data science, critical data studies should offer a historically nuanced theory of “data subjectivity” responsive to the epistemic methods, harms and benefits of data science and commerce.},
note = {Publisher: SAGE Publications Ltd},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Cohen, Julie E.
The Regulatory State in the Information Age The Constitution of Information: From Gutenberg to Snowden Journal Article
In: Theoretical Inquiries in Law, vol. 17, no. 2, pp. 369–414, 2016.
@article{cohen_regulatory_2016,
title = {The Regulatory State in the Information Age The Constitution of Information: From Gutenberg to Snowden},
author = {Julie E. Cohen},
url = {https://heinonline.org/HOL/P?h=hein.journals/thinla17&i=378},
year = {2016},
date = {2016-01-01},
urldate = {2024-10-22},
journal = {Theoretical Inquiries in Law},
volume = {17},
number = {2},
pages = {369–414},
abstract = {This Article examines the regulatory state through the lens of evolving political economy, arguing that a significant reconstruction is now underway. The ongoing shift from an industrial mode of development to an informational one has created existential challenges for regulatory models and constructs developed in the context of the industrial economy. Contemporary contests over the substance of regulatory mandates and the shape of regulatory institutions are most usefully understood as moves within a larger struggle to chart a new direction for the regulatory state in the era of informational capitalism. A regulatory state optimized for the information economy must develop rubrics for responding to three problems that have confounded existing regulatory regimes: (1) platform power — the power to link facially separate markets and/or to constrain participation in markets by using technical protocols; (2) infoglut — unmanageably voluminous, mediated information flows that create information overload; and (3) systemic threat — nascent, probabilistically-defined harm to be realized at some point in the future. Additionally, it must develop institutions capable of exercising effective oversight of information-era activities. The information-era regulatory models that have begun to emerge are procedurally informal, mediated by networks of professional and technical expertise that define relevant standards, and financialized. Such models, however, also have tended to be both opaque to external observation and highly prone to capture. New institutional forms that might ensure their legal and political accountability have been slow to develop.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Fenwick, Mark; Kaal, Wulf A.; Vermeulen, Erik P. M.
Regulation Tomorrow: What Happens When Technology Is Faster than the Law Journal Article
In: American University Business Law Review, vol. 6, no. 3, pp. 561–594, 2016.
@article{fenwick_regulation_2016,
title = {Regulation Tomorrow: What Happens When Technology Is Faster than the Law},
author = {Mark Fenwick and Wulf A. Kaal and Erik P. M. Vermeulen},
url = {https://heinonline.org/HOL/P?h=hein.journals/aubulrw6&i=596},
year = {2016},
date = {2016-01-01},
urldate = {2024-10-22},
journal = {American University Business Law Review},
volume = {6},
number = {3},
pages = {561–594},
abstract = {In an age of constant, complex and disruptive technological innovation, knowing what, when, and how to structure regulatory interventions has become more difficult. Regulators find themselves in a situation where they believe they must opt for either reckless action (regulation without sufficient facts) or paralysis (doing nothing). Inevitably in such a case, caution tends to trump risk. But such caution merely functions to reinforce the status quo and makes it harder for new technologies to reach the market in a timely or efficient manner. The solution: lawmaking and regulatory design needs to become more proactive, dynamic, and responsive. So how can regulators actually achieve these goals? What can regulators do to promote innovation and offer better opportunities to people wanting to build a new business around a disruptive technology or simply enjoy the benefits of a disruptive new technology as a consumer?},
keywords = {},
pubstate = {published},
tppubtype = {article}
}