Bygrave, Lee A.
Machine Learning, Cognitive Sovereignty and Data Protection Rights with Respect to Automated Decisions Book Section
In: Stefanini, Elisa; Liguori, Laura; Ienca, Marcello; Pollicino, Oreste; Andorno, Roberto (Ed.): The Cambridge Handbook of Information Technology, Life Sciences and Human Rights, pp. 166–188, Cambridge University Press, Cambridge, 2022, ISBN: 978-1-108-47783-3.
@incollection{stefanini_machine_2022,
title = {Machine Learning, Cognitive Sovereignty and Data Protection Rights with Respect to Automated Decisions},
author = {Lee A. Bygrave},
editor = {Elisa Stefanini and Laura Liguori and Marcello Ienca and Oreste Pollicino and Roberto Andorno},
url = {https://www.cambridge.org/core/books/cambridge-handbook-of-information-technology-life-sciences-and-human-rights/machine-learning-cognitive-sovereignty-and-data-protection-rights-with-respect-to-automated-decisions/A1D153F5D7D4461EAF5B3B965E4B9612},
doi = {10.1017/9781108775038.016},
isbn = {978-1-108-47783-3},
year = {2022},
date = {2022-01-01},
urldate = {2024-10-22},
booktitle = {The Cambridge Handbook of Information Technology, Life Sciences and Human Rights},
pages = {166–188},
publisher = {Cambridge University Press},
address = {Cambridge},
series = {Cambridge Law Handbooks},
abstract = {Human behaviour is increasingly governed by automated decisional systems based on machine learning (ML) and ‘Big Data’. While these systems promise a range of benefits, they also throw up a congeries of challenges, not least for our ability as humans to understand their logic and ramifications. This chapter maps the basic mechanics of such systems, the concerns they raise, and the degree to which these concerns may be remedied by data protection law, particularly those provisions of the EU General Data Protection Regulation that specifically target automated decision-making. Drawing upon the work of Ulrich Beck, the chapter employs the notion of ‘cognitive sovereignty’ to provide an overarching conceptual framing of the subject matter. Cognitive sovereignty essentially denotes our moral and legal interest in being able to comprehend our environs and ourselves. Focus on this interest, the chapter argues, fills a blind spot in scholarship and policy discourse on ML-enhanced decisional systems, and is vital for grounding claims for greater explicability of machine processes.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Kuner, Christopher
Territorial Scope and Data Transfer Rules in the GDPR: Realising the EU’s Ambition of Borderless Data Protection Miscellaneous
2021.
@misc{kuner_territorial_2021,
title = {Territorial Scope and Data Transfer Rules in the GDPR: Realising the EU’s Ambition of Borderless Data Protection},
author = {Christopher Kuner},
url = {https://papers.ssrn.com/abstract=3827850},
doi = {10.2139/ssrn.3827850},
year = {2021},
date = {2021-04-01},
urldate = {2024-10-22},
publisher = {Social Science Research Network},
address = {Rochester, NY},
abstract = {Legal protection of personal data that are transferred or processed outside the EU’s territorial boundaries has been strengthened in recent years, both in the GDPR and by the Court of Justice. The main mechanisms for guarding against data protection threats originating from outside the EU’s borders are rules on the territorial scope of EU data protection law (Article 3 GDPR), which allow its application to data processing by non-EU parties, and data transfer restrictions (Chapter V GDPR), which protect personal data that are transferred to third countries. The GDPR does not indicate how these two mechanisms interact, which has led to initiatives to disapply data transfer rules when data processed outside the EU are already subject to it. However, there has been little transparency about these initiatives or explanation of their rationale, despite their significance for the protection of EU data and their impact on the GDPR’s global reach. For the protection of EU data against external threats to be both legally sound and effective in practice, it is necessary to examine the nature and interaction of rules on territorial scope and data transfers, in order to determine how the EU’s vision of cross-border data protection can be realised.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Bal, Ravtosh; Gill, Indermit S.
Policy Approaches to Artificial Intelligence Based Technologies in China, European Union and the United States Miscellaneous
2020.
@misc{bal_policy_2020,
title = {Policy Approaches to Artificial Intelligence Based Technologies in China, European Union and the United States},
author = {Ravtosh Bal and Indermit S. Gill},
url = {https://papers.ssrn.com/abstract=3699640},
doi = {10.2139/ssrn.3699640},
year = {2020},
date = {2020-09-01},
urldate = {2024-10-22},
publisher = {Social Science Research Network},
address = {Rochester, NY},
abstract = {This paper provides a summary survey of the policy approaches to artificial intelligence-based technologies in China, the European Union, and the United States. China has the most aggressive approach, launching major initiatives since 2015 such as ‘Made in China 2025’, the Internet Plus Plan, the New Generation Artificial Intelligence Development Plan, and the Artificial Intelligence Standardization White Paper. In 2018, the EU finalized both the European AI Strategy and ‘Made in Europe’ or the Coordinated Plan on the Development and Use of Artificial Intelligence. Despite a traditional reticence to adopt national strategies and perhaps pushed by growing concerns about China, the US Government announced the American AI Initiative and a National AI R&D Strategic Plan in 2019. The AI approaches in these three economies reflect their relative strengths—state control in China, citizen voice in Europe, and business practices in America. Unencumbered by privacy concerns, China’s strategy is geared to exploit the abundance of domestic data and to develop AI talent through central schemes and massive injections of money. The European Union’s regulations and spending priorities are guided by the objective of building citizen trust in AI-based technologies by safeguarding privacy and ameliorating disruptions in national labor markets. The mainstay of the US approach is to strengthen the linkages between business and AI-related research, and find ways to fund basic R&D. Despite efforts to indigenize AI innovations, all three economies face challenges. China’s AI strategy continues to rely disproportionately on just three tech giants: Baidu, Tencent and Alibaba, which have investments in more than a 100 AI-involved companies. Europe’s AI resources are unbalanced geographically—a quarter of Europe’s AI talent is in the UK and another quarter in Germany and France—and Brexit poses a serious risk. More than half of the AI talent in the US is foreign born, so immigration policies will inevitably be a central component of a national AI strategy.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Castets-Renard, Celine
Algorithmic Content Moderation on Social Media in EU Law: Illusion of Perfect Enforcement Journal Article
In: University of Illinois Journal of Law, Technology & Policy, vol. 2020, no. 2, pp. 283–324, 2020.
@article{castets-renard_algorithmic_2020,
title = {Algorithmic Content Moderation on Social Media in EU Law: Illusion of Perfect Enforcement},
author = {Celine Castets-Renard},
url = {https://heinonline.org/HOL/P?h=hein.journals/jltp2020&i=295},
year = {2020},
date = {2020-01-01},
urldate = {2024-10-22},
journal = {University of Illinois Journal of Law, Technology & Policy},
volume = {2020},
number = {2},
pages = {283–324},
abstract = {Intermediaries today do much more than passively distribute user content and facilitate user interactions. They now have near-total control of users’ online experience and content moderation. Even though these service providers benefit from the same liability exemption regime as technical intermediaries (E-Commerce Directive, Art. 14), they have unique characteristics that must be addressed. Consequently, many debates are ongoing to decide whether or not platforms should be more strictly regulated.
Platforms are required to remove illegal content in the event of notice and take-down procedures built on automated processing and are equally encouraged to take proactive and automated measures to detect and remove it. Algorithmic decision-making helps to scale down the massive task of content moderation. It would, therefore, seem that algorithmic decision-making would be the most effective way to provide perfect enforcement.
However, this is an illusion. A first difficulty occurs when deciding what, precisely, is illegal. Platforms manage the removal of illegal content automatically, which makes it particularly challenging to verify that the law is being respected. The automated decision-making systems are opaque and many scholars have shown that the main problem here is the over-removal chilling effect. Moreover, content removal is a task which, in many circumstances, should not be automated, as it depends on an appreciation of both the context and the rule of law.
To address this multi-faceted issue, I offer solutions to improve algorithmic accountability and to increase the transparency around automated decision-making. Improvements may be made specifically by providing platform users with new rights, which in turn will provide stronger guarantees for judicial and non-judicial redress in the event of over-removal.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Platforms are required to remove illegal content in the event of notice and take-down procedures built on automated processing and are equally encouraged to take proactive and automated measures to detect and remove it. Algorithmic decision-making helps to scale down the massive task of content moderation. It would, therefore, seem that algorithmic decision-making would be the most effective way to provide perfect enforcement.
However, this is an illusion. A first difficulty occurs when deciding what, precisely, is illegal. Platforms manage the removal of illegal content automatically, which makes it particularly challenging to verify that the law is being respected. The automated decision-making systems are opaque and many scholars have shown that the main problem here is the over-removal chilling effect. Moreover, content removal is a task which, in many circumstances, should not be automated, as it depends on an appreciation of both the context and the rule of law.
To address this multi-faceted issue, I offer solutions to improve algorithmic accountability and to increase the transparency around automated decision-making. Improvements may be made specifically by providing platform users with new rights, which in turn will provide stronger guarantees for judicial and non-judicial redress in the event of over-removal.
Loideain, Nóra Ni
A port in the data-sharing storm: the GDPR and the Internet of things Journal Article
In: Journal of Cyber Policy, vol. 4, no. 2, pp. 178–196, 2019, ISSN: 2373-8871, (Publisher: Routledge _eprint: https://doi.org/10.1080/23738871.2019.1635176).
@article{loideain_port_2019,
title = {A port in the data-sharing storm: the GDPR and the Internet of things},
author = {Nóra Ni Loideain},
url = {https://doi.org/10.1080/23738871.2019.1635176},
doi = {10.1080/23738871.2019.1635176},
issn = {2373-8871},
year = {2019},
date = {2019-05-01},
urldate = {2024-10-22},
journal = {Journal of Cyber Policy},
volume = {4},
number = {2},
pages = {178–196},
abstract = {The onward march of the ‘Internet of Things’ (IoT) heralds an all-encompassing data-driven society where the collection, analysis, sharing, and retention of personal data by service providers, machines and objects will be pervasive and ubiquitous, thereby normalising sustained data gathering from any source possible. In other words, the full realisation of the IoT would best be described as a data-sharing storm where there are no controls or safeguards on what data is shared, who it is shared with, or for what purposes data is used or re-used. As a legal framework that stipulates key principles and safeguards that must be employed when processing of personal data takes place within its scope of application, the EU General Data Protection Regulation (GDPR) represents a port in the data-sharing storm put forward by this vision of the IoT. This article examines what role the recent major upgrade of EU data protection law, under the GDPR, may play in addressing the data protection implications and challenges posed by the IoT for data controllers and processors.},
note = {Publisher: Routledge
_eprint: https://doi.org/10.1080/23738871.2019.1635176},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wachter, Sandra; Mittelstadt, Brent
A Right to Reasonable Inferences: Re-Thinking Data Protection Law in the Age of Big Data and AI Survey: Privacy, Data, and Business Journal Article
In: Columbia Business Law Review, vol. 2019, no. 2, pp. 494–620, 2019.
@article{wachter_right_2019,
title = {A Right to Reasonable Inferences: Re-Thinking Data Protection Law in the Age of Big Data and AI Survey: Privacy, Data, and Business},
author = {Sandra Wachter and Brent Mittelstadt},
url = {https://heinonline.org/HOL/P?h=hein.journals/colb2019&i=506},
year = {2019},
date = {2019-01-01},
urldate = {2024-10-22},
journal = {Columbia Business Law Review},
volume = {2019},
number = {2},
pages = {494–620},
abstract = {Big Data analytics and artificial intelligence (AI) draw non-intuitive and unverifiable inferences and predictions about the behaviors, preferences, and private lives of individuals. These inferences draw on highly diverse and feature-rich data of unpredictable value, and create new opportunities for discriminatory, biased, and invasive decision-making. Data protection law is meant to protect people’s privacy, identity, reputation, and autonomy, but is currently failing to protect data subjects from the novel risks of inferential analytics. The legal status of inferences is heavily disputed in legal scholarship, and marked by inconsistencies and contradictions within and between the views of the Article 29 Working Party and the European Court of Justice (ECJ).
This Article shows that individuals are granted little control and oversight over how their personal data is used to draw inferences about them. Compared to other types of personal data, inferences are effectively ‘economy class’ personal data in the General Data Protection Regulation (GDPR). Data subjects’ rights to know about (Art 13-15), rectify (Art 16), delete (Art 17), object to (Art 21), or port (Art 20) personal data are significantly curtailed for inferences. The GDPR also provides insufficient protection against sensitive inferences (Art 9) or remedies to challenge inferences or important decisions based on them (Art 22(3)).
This situation is not accidental. In standing jurisprudence the ECJ has consistently restricted the remit of data protection law to assessing the legitimacy of input personal data undergoing processing, and to rectify, block, or erase it. Critically, the ECJ has likewise made clear that data protection law is not intended to ensure the accuracy of decisions and decision-making processes involving personal data, or to make these processes fully transparent. Current policy proposals addressing privacy protection (the ePrivacy Regulation and the EU Digital Content Directive) and Europe’s new Copyright Directive and Trade Secrets Directive also fail to close the GDPR’s accountability gaps concerning inferences.
This Article argues that a new data protection right, the ‘right to reasonable inferences’, is needed to help close the accountability gap currently posed by ‘high risk inferences’ , meaning inferences drawn from Big Data analytics that damage privacy or reputation, or have low verifiability in the sense of being predictive or opinion-based while being used in important decisions. This right would require ex-ante justification to be given by the data controller to establish whether an inference is reasonable. This disclosure would address (1) why certain data form a normatively acceptable basis from which to draw inferences; (2) why these inferences are relevant and normatively acceptable for the chosen processing purpose or type of automated decision; and (3) whether the data and methods used to draw the inferences are accurate and statistically reliable. The ex-ante justification is bolstered by an additional ex-post mechanism enabling unreasonable inferences to be challenged.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
This Article shows that individuals are granted little control and oversight over how their personal data is used to draw inferences about them. Compared to other types of personal data, inferences are effectively ‘economy class’ personal data in the General Data Protection Regulation (GDPR). Data subjects’ rights to know about (Art 13-15), rectify (Art 16), delete (Art 17), object to (Art 21), or port (Art 20) personal data are significantly curtailed for inferences. The GDPR also provides insufficient protection against sensitive inferences (Art 9) or remedies to challenge inferences or important decisions based on them (Art 22(3)).
This situation is not accidental. In standing jurisprudence the ECJ has consistently restricted the remit of data protection law to assessing the legitimacy of input personal data undergoing processing, and to rectify, block, or erase it. Critically, the ECJ has likewise made clear that data protection law is not intended to ensure the accuracy of decisions and decision-making processes involving personal data, or to make these processes fully transparent. Current policy proposals addressing privacy protection (the ePrivacy Regulation and the EU Digital Content Directive) and Europe’s new Copyright Directive and Trade Secrets Directive also fail to close the GDPR’s accountability gaps concerning inferences.
This Article argues that a new data protection right, the ‘right to reasonable inferences’, is needed to help close the accountability gap currently posed by ‘high risk inferences’ , meaning inferences drawn from Big Data analytics that damage privacy or reputation, or have low verifiability in the sense of being predictive or opinion-based while being used in important decisions. This right would require ex-ante justification to be given by the data controller to establish whether an inference is reasonable. This disclosure would address (1) why certain data form a normatively acceptable basis from which to draw inferences; (2) why these inferences are relevant and normatively acceptable for the chosen processing purpose or type of automated decision; and (3) whether the data and methods used to draw the inferences are accurate and statistically reliable. The ex-ante justification is bolstered by an additional ex-post mechanism enabling unreasonable inferences to be challenged.
KUZIEMSKI, Maciej; PALKA, Przemyslaw
AI governance post-GDPR: lessons learned and the road ahead Book
European University Institute, 2019.
@book{kuziemski_ai_2019,
title = {AI governance post-GDPR: lessons learned and the road ahead},
author = {Maciej KUZIEMSKI and Przemyslaw PALKA},
url = {https://cadmus.eui.eu/handle/1814/64146},
year = {2019},
date = {2019-01-01},
urldate = {2024-10-22},
publisher = {European University Institute},
abstract = {Recent breakthroughs in the development of Artificial Intelligence (AI) have initiated heated debates regarding its governance. As of today, the success of AI relies on machine learning – the ability of algorithms to learn from, and find patterns in, large amounts of data. Consequently, governance of AI will in practice mean policies regarding both the design and access to algorithms, as well as collection and usage of information. Regarding the latter, the European Union (EU) has put in place a comprehensive normative framework: the General Data Protection Regulation (GDPR)1, applicable since 25 May 2018. Based on the discussion that took place during the School of Transnational Governance’s High-Level Policy Dialogue on 26 June 2018, we present three actionable recommendations for global and local policymakers coming to grasp with the questions of AI Governance},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Giuffrida, Iria
In: Fordham Law Review, vol. 88, no. 2, pp. 439–456, 2019.
@article{giuffrida_liability_2019,
title = {Liability for AI Decision-Making: Some Legal and Ethical Considerations Symposium: Rise of the Machines: Artificial Intelligence, Robotics, and the Reprogramming of Law},
author = {Iria Giuffrida},
url = {https://heinonline.org/HOL/P?h=hein.journals/flr88&i=455},
year = {2019},
date = {2019-01-01},
urldate = {2024-10-22},
journal = {Fordham Law Review},
volume = {88},
number = {2},
pages = {439–456},
abstract = {Benjamin Franklin wrote that nothing is certain “except death and taxes.” A cynical former litigator, like the author, might add to those the certainty of litigation as new technology creates an increasing number of real challenges. With breakthroughs in artificial intelligence (AI) and related technologies, their uses are being implemented in government, finance, health care, law, environmental protection, and education.
AI plays varied functions in these applications. AI systems can be descriptive as they tell you what happened; diagnostic as they tell you why something happened; predictive as they forecast what will (statistically) happen; and prescriptive in being capable of performing actual decision-making and implementation.
The creation and commercialization of these systems raise the question of how liability risks will play out in real life. However, as technical advancements have outpaced legal actions, it is unclear how the law will treat AI systems. This Article briefly addresses the legal ramifications and liability risks associated with reliance on—or delegation to—AI systems, and it sketches a framework suggesting how we can address the question of whether AI merits a new approach to deal with the liability challenges it raises when humans remain “in” or “on” the loop. This Article also suggests that questions of how we, as a society, deal with those challenges have to be connected to the broader ethical questions that AI evokes, such as whether we really want to create a fully autonomous system that we cannot control and how we could protect against “artificial stupidity.”},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
AI plays varied functions in these applications. AI systems can be descriptive as they tell you what happened; diagnostic as they tell you why something happened; predictive as they forecast what will (statistically) happen; and prescriptive in being capable of performing actual decision-making and implementation.
The creation and commercialization of these systems raise the question of how liability risks will play out in real life. However, as technical advancements have outpaced legal actions, it is unclear how the law will treat AI systems. This Article briefly addresses the legal ramifications and liability risks associated with reliance on—or delegation to—AI systems, and it sketches a framework suggesting how we can address the question of whether AI merits a new approach to deal with the liability challenges it raises when humans remain “in” or “on” the loop. This Article also suggests that questions of how we, as a society, deal with those challenges have to be connected to the broader ethical questions that AI evokes, such as whether we really want to create a fully autonomous system that we cannot control and how we could protect against “artificial stupidity.”
Siapka, Anastasia
The Ethical and Legal Challenges of Artificial Intelligence: The EU response to biased and discriminatory AI Miscellaneous
2018.
@misc{siapka_ethical_2018,
title = {The Ethical and Legal Challenges of Artificial Intelligence: The EU response to biased and discriminatory AI},
author = {Anastasia Siapka},
url = {https://papers.ssrn.com/abstract=3408773},
doi = {10.2139/ssrn.3408773},
year = {2018},
date = {2018-12-01},
urldate = {2024-10-22},
publisher = {Social Science Research Network},
address = {Rochester, NY},
abstract = {The proliferation of Artificial Intelligence (AI) in decision-making contexts is hailed as a silver bullet, pledging to replace human subjectivity with objective, infallible decisions. Paradoxically, considerable journalistic reporting has recently commanded attention to biased and discriminatory attitudes displayed by AI systems on both sides of the Atlantic. Notwithstanding the permeation of automated decision-making in critical settings, such as criminal justice, job recruitment, and border control, wherein rights and freedoms of individuals and groups are likewise imperilled, there is often no way for human agents to untangle how AI systems reach such unacceptable decisions. The conspicuous bias problem of AI alongside its operation as an inexplicable ‘black box’ render the exploration of this phenomenon pressing, primarily in the less examined EU policy arena. This dissertation pursues an interdisciplinary research methodology to examine which are the main ethical and legal challenges that Narrow AI, especially in its data-driven Machine Learning (ML) form, poses in relation to bias and discrimination across the EU. Chapter 1 equips readers with pertinent background information regarding AI and its interdependent ML and Big Data technologies. In an accessible manner, it takes heed of the definitions and types of AI adopted by EU instruments along with the milestones in its historical progression and its current stage of development. Chapter 2 conducts a philosophical analysis to argue against the putative ethical neutrality of AI. Ethical concerns of epistemological nature reveal that biases traverse AI systems through the selection of objectives, training data, the reliance on correlations, and the epistemic inequality between lay individuals and AI developers in combination with that between human agents and ‘black box’ machines in general. Touching upon normative ethical concerns, AI systems entail effects which, according to egalitarianism, oppose normative ideals of fairness and equality. In more Kafkaesque scenarios, individuals and corporations may use technical particularities of AI to mask their discriminatory intent.In Chapter 3, a doctrinal legal methodology is applied to reveal the tensions of these challenging instantiations of AI in light of soft and hard EU law instruments. In consideration of its data-driven character, biased and discriminatory AI decisions fall within the applicability scope of the newly enforced General Data Protection Regulation (GDPR). In particular, the data processing principles of Article 5, the Data Protection Impact Assessments (DPIA) of Article 35, the prohibition of automated decision-making and the speculative right to explanation of Article 22, the principles of lawfulness, fairness, and transparency of Article 5 (1) a), the suggested implementation of auditing, and the enhanced enforcement authorities receive scrutiny.The dissertation concludes that a principles-based approach and the provision of anticipatory impact assessments are regulatory strengths of the GDPR. However, the EU should discourage the deployment of AI in crucial decision-making contexts and explore ways to fill related legal gaps. Overall, Trustworthy AI is proposed as an ethical and legal paragon in the face of biased and discriminatory AI.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Besse, Philippe; Castets-Renard, Celine; Garivier, Aurelien; Loubes, Jean-Michel
Can everyday AI be ethical. Fairness of Machine Learning Algorithms Miscellaneous
2018, (arXiv:1810.01729).
@misc{besse_can_2018,
title = {Can everyday AI be ethical. Fairness of Machine Learning Algorithms},
author = {Philippe Besse and Celine Castets-Renard and Aurelien Garivier and Jean-Michel Loubes},
url = {http://arxiv.org/abs/1810.01729},
doi = {10.48550/arXiv.1810.01729},
year = {2018},
date = {2018-10-01},
urldate = {2024-10-22},
publisher = {arXiv},
abstract = {Combining big data and machine learning algorithms, the power of automatic decision tools induces as much hope as fear. Many recently enacted European legislation (GDPR) and French laws attempt to regulate the use of these tools. Leaving aside the well-identified problems of data confidentiality and impediments to competition, we focus on the risks of discrimination, the problems of transparency and the quality of algorithmic decisions. The detailed perspective of the legal texts, faced with the complexity and opacity of the learning algorithms, reveals the need for important technological disruptions for the detection or reduction of the discrimination risk, and for addressing the right to obtain an explanation of the auto- matic decision. Since trust of the developers and above all of the users (citizens, litigants, customers) is essential, algorithms exploiting personal data must be deployed in a strict ethical framework. In conclusion, to answer this need, we list some ways of controls to be developed: institutional control, ethical charter, external audit attached to the issue of a label.},
note = {arXiv:1810.01729},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Cath, Corinne; Wachter, Sandra; Mittelstadt, Brent; Taddeo, Mariarosaria; Floridi, Luciano
Artificial Intelligence and the ‘Good Society’: the US, EU, and UK approach Journal Article
In: Science and Engineering Ethics, vol. 24, no. 2, pp. 505–528, 2018, ISSN: 1471-5546.
@article{cath_artificial_2018,
title = {Artificial Intelligence and the ‘Good Society’: the US, EU, and UK approach},
author = {Corinne Cath and Sandra Wachter and Brent Mittelstadt and Mariarosaria Taddeo and Luciano Floridi},
url = {https://doi.org/10.1007/s11948-017-9901-7},
doi = {10.1007/s11948-017-9901-7},
issn = {1471-5546},
year = {2018},
date = {2018-04-01},
urldate = {2024-10-22},
journal = {Science and Engineering Ethics},
volume = {24},
number = {2},
pages = {505–528},
abstract = {In October 2016, the White House, the European Parliament, and the UK House of Commons each issued a report outlining their visions on how to prepare society for the widespread use of artificial intelligence (AI). In this article, we provide a comparative assessment of these three reports in order to facilitate the design of policies favourable to the development of a ‘good AI society’. To do so, we examine how each report addresses the following three topics: (a) the development of a ‘good AI society’; (b) the role and responsibility of the government, the private sector, and the research community (including academia) in pursuing such a development; and (c) where the recommendations to support such a development may be in need of improvement. Our analysis concludes that the reports address adequately various ethical, social, and economic topics, but come short of providing an overarching political vision and long-term strategy for the development of a ‘good AI society’. In order to contribute to fill this gap, in the conclusion we suggest a two-pronged approach.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Jabłonowska, Agnieszka; Kuziemski, Maciej; Nowak, Anna Maria; Micklitz, Hans-Wolfgang; Palka, Przemyslaw; Sartor, Giovanni
2018, (Accepted: 2018-08-02T10:47:18Z ISSN: 1725-6739).
@techreport{jablonowska_consumer_2018,
title = {Consumer law and artificial intelligence : challenges to the EU consumer law and policy stemming from the business' use of artificial intelligence : final report of the ARTSY project},
author = {Agnieszka Jabłonowska and Maciej Kuziemski and Anna Maria Nowak and Hans-Wolfgang Micklitz and Przemyslaw Palka and Giovanni Sartor},
url = {https://cadmus.eui.eu/handle/1814/57484},
year = {2018},
date = {2018-01-01},
urldate = {2024-10-22},
abstract = {Potential regulation of use of artificial intelligence by business should minimize the risks for consumers and the society without impeding the possible benefits. To do so, we argue, the legal reaction should be grounded in an empirical analysis and proceed case-by-case, bottom-up, as a series of responses to concrete research questions. The ambition of this report has been to commence and facilitate that process. We extensively document and evaluate the market practice of the corporate use of AI, map the scholarly debates about (consumer) law and artificial intelligence, and present a list of twenty five research questions which, in our opinion, require attention of regulators and academia.
The report is divided into four sections. The first explains our understanding of the concepts of “artificial intelligence” (a set of socio-technological practices enabled by machine learning and big data) and “consumer law” (various legal instruments concretizing the principles of the weaker party protection, non-discrimination, regulated autonomy and consumer privacy). The second section documents the ways in which the business uses artificial intelligence in seven sectors of the economy: finance and insurance, information services, energy and “smart solutions”, retail, autonomous vehicles, healthcare and legal services. For each analyzed sector we study the gains for the businesses stemming from the deployment of AI, the potential gains, but also challenges for consumers, as well as third party effects. In the third section, we repeat the analysis through the lens of four general “uses” of AI by businesses in various sectors: knowledge generation, automated decision making, advertising and other commercial practices and personal digital assistants. Finally, in the fourth section, we present the questions which we believe should be addressed in the next stage of the research. We cluster them into: normative questions about regulatory goals, technological and governance questions about regulatory means, and theoretical questions about concepts and preconceptions.},
note = {Accepted: 2018-08-02T10:47:18Z
ISSN: 1725-6739},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
The report is divided into four sections. The first explains our understanding of the concepts of “artificial intelligence” (a set of socio-technological practices enabled by machine learning and big data) and “consumer law” (various legal instruments concretizing the principles of the weaker party protection, non-discrimination, regulated autonomy and consumer privacy). The second section documents the ways in which the business uses artificial intelligence in seven sectors of the economy: finance and insurance, information services, energy and “smart solutions”, retail, autonomous vehicles, healthcare and legal services. For each analyzed sector we study the gains for the businesses stemming from the deployment of AI, the potential gains, but also challenges for consumers, as well as third party effects. In the third section, we repeat the analysis through the lens of four general “uses” of AI by businesses in various sectors: knowledge generation, automated decision making, advertising and other commercial practices and personal digital assistants. Finally, in the fourth section, we present the questions which we believe should be addressed in the next stage of the research. We cluster them into: normative questions about regulatory goals, technological and governance questions about regulatory means, and theoretical questions about concepts and preconceptions.
Genderen, RH
Privacy and Data Protection in the Age of Pervasive Technologies in AI and Robotics Journal Article
In: European Data Protection Law Review, vol. 2017, no. 3, pp. 338–352, 2017, ISSN: 2364-2831.
@article{rh_van_genderen_privacy_2017,
title = {Privacy and Data Protection in the Age of Pervasive Technologies in AI and Robotics},
author = {RH Genderen},
doi = {10.21552/edpl/2017/3/8},
issn = {2364-2831},
year = {2017},
date = {2017-08-01},
journal = {European Data Protection Law Review},
volume = {2017},
number = {3},
pages = {338–352},
abstract = {Robots have been a part of the popular imagination since antiquity. And yet the idea of a robot — a being that exists somehow in the twilight between machine and person — continues to fascinate. Privacy, data protection and physical integrity will be structurally influenced by the pervasive integration of Artificial Intelligence (AI) and robotics. Can we find ways to control this development or do we just have to live with the disintegration of privacy as we know it? Will the new rules by the GDPR on data protection suffice to protect our personal data or are these processes in the AI era impossible to regulate? How vulnerable is AI concerning the processing of our personal data? Do we still care about our privacy, if we increasingly share our personal information with other parties? What should our itinerary for the future be when attempting to create an acceptable solution? In this article these questions are discussed but the answers lie in actions for the future.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wachter, Sandra; Mittelstadt, Brent; Floridi, Luciano
Why a Right to Explanation of Automated Decision-Making Does Not Exist in the General Data Protection Regulation Journal Article
In: International Data Privacy Law, vol. 7, no. 2, pp. 76–99, 2017, ISSN: 2044-3994.
@article{wachter_why_2017,
title = {Why a Right to Explanation of Automated Decision-Making Does Not Exist in the General Data Protection Regulation},
author = {Sandra Wachter and Brent Mittelstadt and Luciano Floridi},
url = {https://doi.org/10.1093/idpl/ipx005},
doi = {10.1093/idpl/ipx005},
issn = {2044-3994},
year = {2017},
date = {2017-05-01},
urldate = {2024-10-22},
journal = {International Data Privacy Law},
volume = {7},
number = {2},
pages = {76–99},
abstract = {Key PointsSince approval of the European Union General Data Protection Regulation (GDPR) in 2016, it has been widely and repeatedly claimed that a ‘right to explanation’ of all decisions made by automated or artificially intelligent algorithmic systems will be legally mandated by the GDPR once it is in force, in 2018.However, there are several reasons to doubt both the legal existence and the feasibility of such a right. In contrast to the right to explanation of specific automated decisions claimed elsewhere, the GDPR only mandates that data subjects receive meaningful, but properly limited, information (Articles 13–15) about the logic involved, as well as the significance and the envisaged consequences of automated decision-making systems, what we term a ‘right to be informed’.The ambiguity and limited scope of the ‘right not to be subject to automated decision-making’ contained in Article 22 (from which the alleged ‘right to explanation’ stems) raises questions over the protection actually afforded to data subjects.These problems show that the GDPR lacks precise language as well as explicit and well-defined rights and safeguards against automated decision-making, and therefore runs the risk of being toothless.We propose a number of legislative steps that, if implemented, may improve the transparency and accountability of automated decision-making when the GDPR comes into force in 2018.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lynskey, Orla
Regulating 'Platform Power' Miscellaneous
2017.
@misc{lynskey_regulating_2017,
title = {Regulating 'Platform Power'},
author = {Orla Lynskey},
url = {https://papers.ssrn.com/abstract=2921021},
doi = {10.2139/ssrn.2921021},
year = {2017},
date = {2017-02-01},
urldate = {2024-10-22},
publisher = {Social Science Research Network},
address = {Rochester, NY},
abstract = {Increasing regulatory and doctrinal attention has recently focused on the problem of ‘platform power’. Yet calls for regulation of online platforms fail to identify the problems such regulation would target, and as a result appear to lack merit. In this paper, two claims are advanced. First, that the concept of ‘platform power’ is both an under and over-inclusive regulatory target and, as such, should be replaced by the broader concept of a ‘digital gatekeeper’. Second, that existing legal mechanisms do not adequately reflect the power over information flows and individual behaviour that gatekeepers can exercise. In particular, this gatekeeper power can have implications for individual rights that competition law and economic regulation are not designed to capture. Moreover, the technological design, and complexity, of digital gatekeepers renders their operations impervious to scrutiny by individual users, thereby exacerbating these potential implications.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Bayern, Shawn; Burri, Thomas; Grant, Thomas D.; Hausermann, Daniel M.; Moslein, Florian; Williams, Richard
Company Law and Autonomous Systems: A Blueprint for Lawyers, Entrepreneurs, and Regulators Journal Article
In: Hastings Science and Technology Law Journal, vol. 9, no. 2, pp. 135–162, 2017.
@article{bayern_company_2017,
title = {Company Law and Autonomous Systems: A Blueprint for Lawyers, Entrepreneurs, and Regulators},
author = {Shawn Bayern and Thomas Burri and Thomas D. Grant and Daniel M. Hausermann and Florian Moslein and Richard Williams},
url = {https://heinonline.org/HOL/P?h=hein.journals/hascietlj9&i=169},
year = {2017},
date = {2017-01-01},
urldate = {2024-10-22},
journal = {Hastings Science and Technology Law Journal},
volume = {9},
number = {2},
pages = {135–162},
abstract = {In discussions of the regulation of autonomous systems, private law — specifically, company law — has been neglected as a potential legal and regulatory interface. As one of us has suggested previously, there are several possibilities for the creation of company structures that might provide functional and adaptive legal “housing” for advanced software, various types of artificial intelligence, and other programmatic systems and organizations — phenomena that we refer to here collectively as autonomous systems for ease of reference. In particular, this prior work introduces the notion that an operating agreement or private entity constitution (such as a corporation’s charter or a partnership’s operating agreement) can adopt, as the acts of a legal entity, the state or actions of arbitrary physical systems. We call this the algorithm-agreement equivalence principle. Given this principle and the present capacities existing forms of legal entities, companies of various kinds can serve as a mechanism through which autonomous systems might engage with the legal system.
This paper considers the implications of this possibility from a comparative and international perspective. Our goal is to suggest how, under U.S., German, Swiss and U.K. law, company law might furnish the functional and adaptive legal “housing” for an autonomous system — and, in turn, we aim to inform systems designers, regulators, and others who are interested in, encouraged by, or alarmed at the possibility that an autonomous system may “inhabit” a company and thereby gain some of the incidents of legal personality. We do not aim here to be normative. Instead, the paper lays out a template suggesting how existing laws might provide a potentially unexpected regulatory framework for autonomous systems, and to explore some legal consequences of this possibility. We do suggest that these considerations might spur others to consider the relevant provisions of their own national laws with a view to locating similar legal “spaces” that autonomous systems could “inhabit.”},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
This paper considers the implications of this possibility from a comparative and international perspective. Our goal is to suggest how, under U.S., German, Swiss and U.K. law, company law might furnish the functional and adaptive legal “housing” for an autonomous system — and, in turn, we aim to inform systems designers, regulators, and others who are interested in, encouraged by, or alarmed at the possibility that an autonomous system may “inhabit” a company and thereby gain some of the incidents of legal personality. We do not aim here to be normative. Instead, the paper lays out a template suggesting how existing laws might provide a potentially unexpected regulatory framework for autonomous systems, and to explore some legal consequences of this possibility. We do suggest that these considerations might spur others to consider the relevant provisions of their own national laws with a view to locating similar legal “spaces” that autonomous systems could “inhabit.”