1.
Stefano, Valerio De
2020.
@misc{de_stefano_negotiating_2020,
title = {Negotiating Governance and Control Over AI-Bosses at Work – Position Paper Presented at OECD Network of Experts on Artificial Intelligence (One AI)},
author = {Valerio De Stefano},
url = {https://papers.ssrn.com/abstract=3542831},
doi = {10.2139/ssrn.3542831},
year = {2020},
date = {2020-02-01},
urldate = {2024-10-22},
publisher = {Social Science Research Network},
address = {Rochester, NY},
abstract = {This short position paper was presented at the Meeting of the OECD Network of Experts on Artificial Intelligence (ONE AI) held in Paris on 26-27 February 2020. It argues that public and collective governance, including through collective bargaining, is urgently needed to prevent the most invasive uses of Artificial Intelligence and algorithmic management and surveillance over workers. Tools such as GPS-based wearable devices that monitor workers' position at the workplace and collect data on particularly private information, including workers' heartbeats and emotional status call for immediate action from regulators and social partners. Surveillance practices should be limited to indispensable cases and transparency of data collection and processes should be safeguarded, also to rule out the many possible discriminatory consequences of management-by-algorithm.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
This short position paper was presented at the Meeting of the OECD Network of Experts on Artificial Intelligence (ONE AI) held in Paris on 26-27 February 2020. It argues that public and collective governance, including through collective bargaining, is urgently needed to prevent the most invasive uses of Artificial Intelligence and algorithmic management and surveillance over workers. Tools such as GPS-based wearable devices that monitor workers' position at the workplace and collect data on particularly private information, including workers' heartbeats and emotional status call for immediate action from regulators and social partners. Surveillance practices should be limited to indispensable cases and transparency of data collection and processes should be safeguarded, also to rule out the many possible discriminatory consequences of management-by-algorithm.
2.
Selinger, Evan; Hartzog, Woodrow
The Inconsentability of Facial Surveillance Consentability Symposium Journal Article
In: Loyola Law Review, vol. 66, no. 1, pp. 33–54, 2020.
@article{selinger_inconsentability_2020,
title = {The Inconsentability of Facial Surveillance Consentability Symposium},
author = {Evan Selinger and Woodrow Hartzog},
url = {https://heinonline.org/HOL/P?h=hein.journals/loyolr66&i=43},
year = {2020},
date = {2020-01-01},
urldate = {2024-10-22},
journal = {Loyola Law Review},
volume = {66},
number = {1},
pages = {33–54},
abstract = {Governments and companies often use consent to justify the use of facial recognition technologies for surveillance. Many proposals for regulating facial recognition technology incorporate consent rules as a way to protect those faces that are being tagged and tracked. But consent is a broken regulatory mechanism for facial surveillance. The individual risks of facial surveillance are impossibly opaque, and our collective autonomy and obscurity interests aren’t captured or served by individual decisions.
In this article, we argue that facial recognition technologies have a massive and likely fatal consent problem. We reconstruct some of Nancy Kim’s fundamental claims in Consentability: Consent and Its Limits, emphasizing how her consentability framework grants foundational priority to individual and social autonomy, integrates empirical insights into cognitive limitations that significantly impact the quality of human decision-making when granting consent, and identifies social, psychological, and legal impediments that allow the pace and negative consequences of innovation to outstrip the protections of legal regulation.
We also expand upon Kim’s analysis by arguing that valid consent cannot be given for face surveillance. Even if valid individual consent to face surveillance was possible, permission for such surveillance is in irresolvable conflict with our collective autonomy and obscurity interests. Additionally, there is good reason to be skeptical of consent as the justification for any use of facial recognition technology, including facial characterization, verification, and identification.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Governments and companies often use consent to justify the use of facial recognition technologies for surveillance. Many proposals for regulating facial recognition technology incorporate consent rules as a way to protect those faces that are being tagged and tracked. But consent is a broken regulatory mechanism for facial surveillance. The individual risks of facial surveillance are impossibly opaque, and our collective autonomy and obscurity interests aren’t captured or served by individual decisions.
In this article, we argue that facial recognition technologies have a massive and likely fatal consent problem. We reconstruct some of Nancy Kim’s fundamental claims in Consentability: Consent and Its Limits, emphasizing how her consentability framework grants foundational priority to individual and social autonomy, integrates empirical insights into cognitive limitations that significantly impact the quality of human decision-making when granting consent, and identifies social, psychological, and legal impediments that allow the pace and negative consequences of innovation to outstrip the protections of legal regulation.
We also expand upon Kim’s analysis by arguing that valid consent cannot be given for face surveillance. Even if valid individual consent to face surveillance was possible, permission for such surveillance is in irresolvable conflict with our collective autonomy and obscurity interests. Additionally, there is good reason to be skeptical of consent as the justification for any use of facial recognition technology, including facial characterization, verification, and identification.
In this article, we argue that facial recognition technologies have a massive and likely fatal consent problem. We reconstruct some of Nancy Kim’s fundamental claims in Consentability: Consent and Its Limits, emphasizing how her consentability framework grants foundational priority to individual and social autonomy, integrates empirical insights into cognitive limitations that significantly impact the quality of human decision-making when granting consent, and identifies social, psychological, and legal impediments that allow the pace and negative consequences of innovation to outstrip the protections of legal regulation.
We also expand upon Kim’s analysis by arguing that valid consent cannot be given for face surveillance. Even if valid individual consent to face surveillance was possible, permission for such surveillance is in irresolvable conflict with our collective autonomy and obscurity interests. Additionally, there is good reason to be skeptical of consent as the justification for any use of facial recognition technology, including facial characterization, verification, and identification.
3.
Manheim, Karl; Kaplan, Lyric
Artificial Intelligence: Risks to Privacy and Democracy Journal Article
In: Yale Journal of Law and Technology, vol. 21, pp. 106–188, 2019.
@article{manheim_artificial_2019,
title = {Artificial Intelligence: Risks to Privacy and Democracy},
author = {Karl Manheim and Lyric Kaplan},
url = {https://heinonline.org/HOL/P?h=hein.journals/yjolt21&i=106},
year = {2019},
date = {2019-01-01},
urldate = {2024-10-22},
journal = {Yale Journal of Law and Technology},
volume = {21},
pages = {106–188},
abstract = {A “Democracy Index” is published annually by the Economist. For 2017, it reported that half of the world’s countries scored lower than the previous year. This included the United States, which was demoted from “full democracy” to “flawed democracy.” The principal factor was “ero-sion of confidence in government and public institutions.” Interference by Russia and voter manipulation by Cambridge Analytica in the 2016 presi-dential election played a large part in that public disaffection.
Threats of these kinds will continue, fueled by growing deployment of artificial intelligence (AI) tools to manipulate the preconditions and levers of democracy. Equally destructive is AI’s threat to decisional and informa-tional privacy. AI is the engine behind Big Data Analytics and the Internet of Things. While conferring some consumer benefit, their principal function at present is to capture personal information, create detailed behavioral profiles and sell us goods and agendas. Privacy, anonymity and autonomy are the main casualties of AI’s ability to manipulate choices in economic and political decisions.
The way forward requires greater attention to these risks at the nation-al level, and attendant regulation. In its absence, technology giants, all of whom are heavily investing in and profiting from AI, will dominate not only the public discourse, but also the future of our core values and democratic institutions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
A “Democracy Index” is published annually by the Economist. For 2017, it reported that half of the world’s countries scored lower than the previous year. This included the United States, which was demoted from “full democracy” to “flawed democracy.” The principal factor was “ero-sion of confidence in government and public institutions.” Interference by Russia and voter manipulation by Cambridge Analytica in the 2016 presi-dential election played a large part in that public disaffection.
Threats of these kinds will continue, fueled by growing deployment of artificial intelligence (AI) tools to manipulate the preconditions and levers of democracy. Equally destructive is AI’s threat to decisional and informa-tional privacy. AI is the engine behind Big Data Analytics and the Internet of Things. While conferring some consumer benefit, their principal function at present is to capture personal information, create detailed behavioral profiles and sell us goods and agendas. Privacy, anonymity and autonomy are the main casualties of AI’s ability to manipulate choices in economic and political decisions.
The way forward requires greater attention to these risks at the nation-al level, and attendant regulation. In its absence, technology giants, all of whom are heavily investing in and profiting from AI, will dominate not only the public discourse, but also the future of our core values and democratic institutions.
Threats of these kinds will continue, fueled by growing deployment of artificial intelligence (AI) tools to manipulate the preconditions and levers of democracy. Equally destructive is AI’s threat to decisional and informa-tional privacy. AI is the engine behind Big Data Analytics and the Internet of Things. While conferring some consumer benefit, their principal function at present is to capture personal information, create detailed behavioral profiles and sell us goods and agendas. Privacy, anonymity and autonomy are the main casualties of AI’s ability to manipulate choices in economic and political decisions.
The way forward requires greater attention to these risks at the nation-al level, and attendant regulation. In its absence, technology giants, all of whom are heavily investing in and profiting from AI, will dominate not only the public discourse, but also the future of our core values and democratic institutions.
4.
Aloisi, Antonio; Gramano, Elena
In: Comparative Labor Law & Policy Journal, vol. 41, no. 1, pp. 95–122, 2019.
@article{aloisi_artificial_2019,
title = {Artificial Intelligence Is Watching You at Work: Digital Surveillance, Employee Monitoring, and Regulatory Issues in the EU Context Automation, Artificial Intelligence, & Labor Law},
author = {Antonio Aloisi and Elena Gramano},
url = {https://heinonline.org/HOL/P?h=hein.journals/cllpj41&i=107},
year = {2019},
date = {2019-01-01},
urldate = {2024-10-22},
journal = {Comparative Labor Law & Policy Journal},
volume = {41},
number = {1},
pages = {95–122},
abstract = {By affecting activities in both traditional and modern industries, countless invasive devices constitute a burgeoning terrain for new forms of monitoring assisted by artificial intelligence and algorithms; these range from badges to tablets, from wearables to exoskeletons, from collaborative software to virtual personal assistant, from computer networks to face recognition systems. From a legal perspective, these tools constantly collect, produce, share and combine data that may be used by the employer for all the many different reasons, thus leading to a “genetic variation” of the organizational, monitoring and disciplinary prerogative, considered as the core of the employment contract.
When it comes to recruiting, managing, and vetting the workforce, AI applications can be considered as an effective combination of big data analytics and algorithmic governance. Only recently, have international, European and domestic institutions started considering how to update existing regulation in order to face these complex and far-reaching challenges. This article assesses the effects of AI application on the employment relationship, with a view to understanding how social and legal institutions act, react or adapt to a potential experience of unprecedented digital surveillance in the workplace, entrenching command-and-control relationships between management and workers.
The paper is organized as follows. After describing the new arenas of workplace surveillance, we provide a comprehensive conceptualization of AI application. Section 2 explores the latest generation of digital devices, understood in their broadest definition encompassing both physical supports as well as intangible tools. In many cases, AI prevents accidents caused by human error or reduce the hazard (or even the burden) of routine and menial activities. On the other hand, these software and devices create an effective, invasive and elusive system of watchfulness increasing conformity and promoting docility.
Section 3 describes how the EU has set the tone globally in the regulation of privacy and data protection. In particular, we scrutinize the new GDPR thoroughly. One concern on its effectiveness revolves around the limits on the automated decision-making processes (Art. 22). Section 4 describes how some European civil law systems deal with the regulation of surveillance of workers. The cases of France, Germany and Italy are analyzed by stressing the common elements and loopholes. Section 5 assesses some conclusions by verifying whether the current regulations are suitable to cope with the adoption of AI at work.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
By affecting activities in both traditional and modern industries, countless invasive devices constitute a burgeoning terrain for new forms of monitoring assisted by artificial intelligence and algorithms; these range from badges to tablets, from wearables to exoskeletons, from collaborative software to virtual personal assistant, from computer networks to face recognition systems. From a legal perspective, these tools constantly collect, produce, share and combine data that may be used by the employer for all the many different reasons, thus leading to a “genetic variation” of the organizational, monitoring and disciplinary prerogative, considered as the core of the employment contract.
When it comes to recruiting, managing, and vetting the workforce, AI applications can be considered as an effective combination of big data analytics and algorithmic governance. Only recently, have international, European and domestic institutions started considering how to update existing regulation in order to face these complex and far-reaching challenges. This article assesses the effects of AI application on the employment relationship, with a view to understanding how social and legal institutions act, react or adapt to a potential experience of unprecedented digital surveillance in the workplace, entrenching command-and-control relationships between management and workers.
The paper is organized as follows. After describing the new arenas of workplace surveillance, we provide a comprehensive conceptualization of AI application. Section 2 explores the latest generation of digital devices, understood in their broadest definition encompassing both physical supports as well as intangible tools. In many cases, AI prevents accidents caused by human error or reduce the hazard (or even the burden) of routine and menial activities. On the other hand, these software and devices create an effective, invasive and elusive system of watchfulness increasing conformity and promoting docility.
Section 3 describes how the EU has set the tone globally in the regulation of privacy and data protection. In particular, we scrutinize the new GDPR thoroughly. One concern on its effectiveness revolves around the limits on the automated decision-making processes (Art. 22). Section 4 describes how some European civil law systems deal with the regulation of surveillance of workers. The cases of France, Germany and Italy are analyzed by stressing the common elements and loopholes. Section 5 assesses some conclusions by verifying whether the current regulations are suitable to cope with the adoption of AI at work.
When it comes to recruiting, managing, and vetting the workforce, AI applications can be considered as an effective combination of big data analytics and algorithmic governance. Only recently, have international, European and domestic institutions started considering how to update existing regulation in order to face these complex and far-reaching challenges. This article assesses the effects of AI application on the employment relationship, with a view to understanding how social and legal institutions act, react or adapt to a potential experience of unprecedented digital surveillance in the workplace, entrenching command-and-control relationships between management and workers.
The paper is organized as follows. After describing the new arenas of workplace surveillance, we provide a comprehensive conceptualization of AI application. Section 2 explores the latest generation of digital devices, understood in their broadest definition encompassing both physical supports as well as intangible tools. In many cases, AI prevents accidents caused by human error or reduce the hazard (or even the burden) of routine and menial activities. On the other hand, these software and devices create an effective, invasive and elusive system of watchfulness increasing conformity and promoting docility.
Section 3 describes how the EU has set the tone globally in the regulation of privacy and data protection. In particular, we scrutinize the new GDPR thoroughly. One concern on its effectiveness revolves around the limits on the automated decision-making processes (Art. 22). Section 4 describes how some European civil law systems deal with the regulation of surveillance of workers. The cases of France, Germany and Italy are analyzed by stressing the common elements and loopholes. Section 5 assesses some conclusions by verifying whether the current regulations are suitable to cope with the adoption of AI at work.
5.
Ajunwa, Ifeoma; Crawford, Kate; Schultz, Jason
Limitless Worker Surveillance Journal Article
In: California Law Review, vol. 105, no. 3, pp. 735–776, 2017.
@article{ajunwa_limitless_2017,
title = {Limitless Worker Surveillance},
author = {Ifeoma Ajunwa and Kate Crawford and Jason Schultz},
url = {https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2746211},
year = {2017},
date = {2017-01-01},
urldate = {2024-10-08},
journal = {California Law Review},
volume = {105},
number = {3},
pages = {735–776},
abstract = {From the Pinkerton private detectives of the 1850s, to the closed-circuit cameras and email monitoring of the 1990s, to contemporary apps that quantify the productivity of workers, American employers have increasingly sought to track the activities of their employees. Along with economic and technological limits, the law has always been presumed as a constraint on these surveillance activities. Recently, technological advancements in several fields – data analytics, communications capture, mobile device design, DNA testing, and biometrics – have dramatically expanded capacities for worker surveillance both on and off the job. At the same time, the cost of many forms of surveillance has dropped significantly, while new technologies make the surveillance of workers even more convenient and accessible. This leaves the law as the last meaningful avenue to delineate boundaries for worker surveillance.
In this Article, we examine the effectiveness of the law as a check on worker surveillance, given recent technological innovations. In particular, we focus on two popular trends in worker tracking – productivity apps and worker wellness programs – to argue that current legal constraints are insufficient and may leave American workers at the mercy of 24/7 employer monitoring. We then propose a new comprehensive framework for worker privacy protections that should withstand current and future trends.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
From the Pinkerton private detectives of the 1850s, to the closed-circuit cameras and email monitoring of the 1990s, to contemporary apps that quantify the productivity of workers, American employers have increasingly sought to track the activities of their employees. Along with economic and technological limits, the law has always been presumed as a constraint on these surveillance activities. Recently, technological advancements in several fields – data analytics, communications capture, mobile device design, DNA testing, and biometrics – have dramatically expanded capacities for worker surveillance both on and off the job. At the same time, the cost of many forms of surveillance has dropped significantly, while new technologies make the surveillance of workers even more convenient and accessible. This leaves the law as the last meaningful avenue to delineate boundaries for worker surveillance.
In this Article, we examine the effectiveness of the law as a check on worker surveillance, given recent technological innovations. In particular, we focus on two popular trends in worker tracking – productivity apps and worker wellness programs – to argue that current legal constraints are insufficient and may leave American workers at the mercy of 24/7 employer monitoring. We then propose a new comprehensive framework for worker privacy protections that should withstand current and future trends.
In this Article, we examine the effectiveness of the law as a check on worker surveillance, given recent technological innovations. In particular, we focus on two popular trends in worker tracking – productivity apps and worker wellness programs – to argue that current legal constraints are insufficient and may leave American workers at the mercy of 24/7 employer monitoring. We then propose a new comprehensive framework for worker privacy protections that should withstand current and future trends.