Li, Tiffany
Privacy in Pandemic: Law, Technology, and Public Health in the COVID-19 Crisis Journal Article
In: Loyola University Chicago Law Journal, vol. 52, no. 3, pp. 767, 2021, ISSN: 0024-7081.
@article{li_privacy_2021,
title = {Privacy in Pandemic: Law, Technology, and Public Health in the COVID-19 Crisis},
author = {Tiffany Li},
url = {https://lawecommons.luc.edu/luclj/vol52/iss3/5},
issn = {0024-7081},
year = {2021},
date = {2021-01-01},
journal = {Loyola University Chicago Law Journal},
volume = {52},
number = {3},
pages = {767},
abstract = {The COVID-19 pandemic has caused millions of deaths and disastrous consequences around the world, with lasting repercussions for every field of law, including privacy and technology. The unique characteristics of this pandemic have precipitated an increase in use of new technologies, including remote communications platforms, healthcare robots, and medical AI. Public and private actors alike are using new technologies, like heat sensing, and technologically influenced programs, like contact tracing, leading to a rise in government and corporate surveillance in sectors like healthcare, employment, education, and commerce. Advocates have raised the alarm for privacy and civil liberties violations, but the emergency nature of the pandemic has drowned out many concerns. This Article is the first comprehensive account of privacy in pandemic that maps the terrain of privacy impacts related to technology and public health responses to the COVID-19 crisis. Many have written on the general need for better health privacy protections, education privacy protections, consumer privacy protections, and protections against government and corporate surveillance. However, this Article is the first comprehensive article to examine these problems of privacy and technology specifically in light of the pandemic, arguing that the lens of the pandemic exposes the need for both wide-scale and small-scale reform of privacy law. This Article approaches these problems with a focus on technical realities and social *768 salience, and with a critical awareness of digital and political inequities, crafting normative recommendations with these concepts in mind. Understanding privacy in this time of pandemic is critical for law and policymaking in the near future and for the long-term goals of creating a future society that protects both civil liberties and public health. It is also important to create a contemporary scholarly understanding of privacy in pandemic at this moment in time, as a matter of historical record. By examining privacy in pandemic, in the midst of pandemic, this Article seeks to create a holistic scholarly foundation for future work on privacy, technology, public health, and legal responses to global crises.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Blasimme, Alessandro; Vayena, Effy
The Ethics of AI in Biomedical Research, Patient Care, and Public Health Book Section
In: Dubber, Markus D.; Pasquale, Frank; Das, Sunit (Ed.): The Oxford Handbook of Ethics of AI, pp. 0, Oxford University Press, 2020, ISBN: 978-0-19-006739-7.
@incollection{blasimme_ethics_2020,
title = {The Ethics of AI in Biomedical Research, Patient Care, and Public Health},
author = {Alessandro Blasimme and Effy Vayena},
editor = {Markus D. Dubber and Frank Pasquale and Sunit Das},
url = {https://doi.org/10.1093/oxfordhb/9780190067397.013.45},
doi = {10.1093/oxfordhb/9780190067397.013.45},
isbn = {978-0-19-006739-7},
year = {2020},
date = {2020-07-01},
urldate = {2024-10-21},
booktitle = {The Oxford Handbook of Ethics of AI},
pages = {0},
publisher = {Oxford University Press},
abstract = {This chapter explores ethical issues raised by the use of artificial intelligence (AI) in the domain of biomedical research, healthcare provision, and public health. The litany of ethical challenges that AI in medicine raises cannot be addressed sufficiently by current regulatory and ethical frameworks. The chapter then advances the systemic oversight approach as a governance blueprint, which is based on six principles offering guidance as to the desirable features of oversight structures and processes in the domain of data-intense biomedicine: adaptivity, flexibility, inclusiveness, reflexivity, responsiveness, and monitoring (AFIRRM). In the research domain, ethical review committees will have to incorporate reflexive assessment of the scientific and social merits of AI-driven research and, as a consequence, will have to open their ranks to new professional figures such as social scientists. In the domain of patient care, clinical validation is a crucial issue. Hospitals could equip themselves with “clinical AI oversight bodies” charged with the task of advising clinical administrators. Meanwhile, in the public health sphere, the new level of granularity enabled by AI in disease surveillance or health promotion will have to be negotiated at the level of targeted communities.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Naudé, Wim
Artificial Intelligence against COVID-19: An Early Review Technical Report
IZA - Institute of Labor Economics 2020.
@techreport{naude_artificial_2020,
title = {Artificial Intelligence against COVID-19: An Early Review},
author = {Wim Naudé},
url = {http://www.jstor.org/stable/resrep60040},
year = {2020},
date = {2020-01-01},
urldate = {2024-10-21},
institution = {IZA - Institute of Labor Economics},
abstract = {Artificial Intelligence (AI) is a potentially powerful tool in the fight against the COVID- 19 pandemic. Since the outbreak of the pandemic, there has been a scramble to use AI. This article provides an early, and necessarily selective review, discussing the contribution of AI to the fight against COVID-19, as well as the current constraints on these contributions. Six areas where AI can contribute to the fight against COVID-19 are discussed, namely i) early warnings and alerts, ii) tracking and prediction, iii) data dashboards, iv) diagnosis and prognosis, v) treatments and cures, and vi) social control. It is concluded that AI has not yet been impactful against COVID-19. Its use is hampered by a lack of data, and by too much data. Overcoming these constraints will require a careful balance between data privacy and public health, and rigorous human-AI interaction. It is unlikely that these will be addressed in time to be of much help during the present pandemic. In the meantime, extensive gathering of diagnostic data on who is infectious will be essential to save lives, train AI, and limit economic damages.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Gerke, Sara; Minssen, Timo; Cohen, Glenn
Chapter 12 - Ethical and legal challenges of artificial intelligence-driven healthcare Book Section
In: Bohr, Adam; Memarzadeh, Kaveh (Ed.): Artificial Intelligence in Healthcare, pp. 295–336, Academic Press, 2020, ISBN: 978-0-12-818438-7.
@incollection{gerke_chapter_2020,
title = {Chapter 12 - Ethical and legal challenges of artificial intelligence-driven healthcare},
author = {Sara Gerke and Timo Minssen and Glenn Cohen},
editor = {Adam Bohr and Kaveh Memarzadeh},
url = {https://www.sciencedirect.com/science/article/pii/B9780128184387000125},
doi = {10.1016/B978-0-12-818438-7.00012-5},
isbn = {978-0-12-818438-7},
year = {2020},
date = {2020-01-01},
urldate = {2024-10-21},
booktitle = {Artificial Intelligence in Healthcare},
pages = {295–336},
publisher = {Academic Press},
abstract = {This chapter will map the ethical and legal challenges posed by artificial intelligence (AI) in healthcare and suggest directions for resolving them. Section 1 will briefly clarify what AI is and Section 2 will give an idea of the trends and strategies in the United States (US) and Europe, thereby tailoring the discussion to the ethical and legal debate of AI-driven healthcare. This will be followed in Section 3 by a discussion of four primary ethical challenges, namely, (1) informed consent to use, (2) safety and transparency, (3) algorithmic fairness and biases, and (4) data privacy. Section 4 will then analyze five legal challenges in the US and Europe: (1) safety and effectiveness, (2) liability, (3) data protection and privacy, (4) cybersecurity, and (5) intellectual property law. Finally, Section 5 will summarize the major conclusions and especially emphasize the importance of building an AI-driven healthcare system that is successful and promotes trust and the motto Health AIs for All of Us.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Marks, Mason
Emergent Medical Data: Health Information Inferred by Artificial Intelligence Journal Article
In: UC Irvine Law Review, vol. 11, no. 4, pp. 995–1066, 2020.
@article{marks_emergent_2020,
title = {Emergent Medical Data: Health Information Inferred by Artificial Intelligence},
author = {Mason Marks},
url = {https://heinonline.org/HOL/P?h=hein.journals/ucirvlre11&i=1012},
year = {2020},
date = {2020-01-01},
urldate = {2024-10-21},
journal = {UC Irvine Law Review},
volume = {11},
number = {4},
pages = {995–1066},
abstract = {Artificial intelligence can infer health data from people’s behavior even when their behavior has no apparent connection to their health. AI can monitor one’s location to track the spread of infectious disease, scrutinize retail purchases to identify pregnant customers, and analyze social media to predict who might attempt suicide. These feats are possible because in modern societies, people continuously interact with internet-enabled software and devices. Smartphones, wearables, and online platforms monitor people’s actions and produce digital traces, the electronic remnants of their behavior.
In their raw form, digital traces might not be very interesting or useful; one’s location, retail purchases, and internet browsing habits are relatively mundane data points. However, AI can enhance their value by transforming them into something more useful—emergent medical data. EMD is health information inferred by artificial intelligence from otherwise trivial digital traces.
This Article describes how EMD-based profiling is increasingly promoted as a solution to public health crises such as the COVID-19 pandemic, gun violence, and the opioid crisis. However, there is little evidence to show that EMD-based profiling works. Even worse, it can cause significant harm, and current privacy and data protection laws contain loopholes that allow public and private entities to mine EMD without people’s knowledge or consent.
After describing the risks and benefits of EMD mining and profiling. The Article proposes six different ways of conceptualizing these practices. It concludes with preliminary recommendations for effective regulation. Potential options include banning or restricting the collection of digital traces, regulating EMD mining algorithms, and restricting how EMD can be used once it is produced.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
In their raw form, digital traces might not be very interesting or useful; one’s location, retail purchases, and internet browsing habits are relatively mundane data points. However, AI can enhance their value by transforming them into something more useful—emergent medical data. EMD is health information inferred by artificial intelligence from otherwise trivial digital traces.
This Article describes how EMD-based profiling is increasingly promoted as a solution to public health crises such as the COVID-19 pandemic, gun violence, and the opioid crisis. However, there is little evidence to show that EMD-based profiling works. Even worse, it can cause significant harm, and current privacy and data protection laws contain loopholes that allow public and private entities to mine EMD without people’s knowledge or consent.
After describing the risks and benefits of EMD mining and profiling. The Article proposes six different ways of conceptualizing these practices. It concludes with preliminary recommendations for effective regulation. Potential options include banning or restricting the collection of digital traces, regulating EMD mining algorithms, and restricting how EMD can be used once it is produced.
Morley, Jessica; Machado, Caio; Burr, Christopher; Cowls, Josh; Taddeo, Mariarosaria; Floridi, Luciano
The Debate on the Ethics of AI in Health Care: A Reconstruction and Critical Review Miscellaneous
2019.
@misc{morley_debate_2019,
title = {The Debate on the Ethics of AI in Health Care: A Reconstruction and Critical Review},
author = {Jessica Morley and Caio Machado and Christopher Burr and Josh Cowls and Mariarosaria Taddeo and Luciano Floridi},
url = {https://papers.ssrn.com/abstract=3486518},
doi = {10.2139/ssrn.3486518},
year = {2019},
date = {2019-11-01},
urldate = {2024-10-21},
publisher = {Social Science Research Network},
address = {Rochester, NY},
abstract = {Healthcare systems across the globe are struggling with increasing costs and worsening outcomes. This presents those responsible for overseeing healthcare with a challenge. Increasingly, policymakers, politicians, clinical entrepreneurs and computer and data scientists argue that a key part of the solution will be ‘Artificial Intelligence’ (AI) – particularly Machine Learning (ML). This argument stems not from the belief that all healthcare needs will soon be taken care of by “robot doctors.” Instead, it is an argument that rests on the classic counterfactual definition of AI as an umbrella term for a range of techniques that can be used to make machines complete tasks in a way that would be considered intelligent were they to be completed by a human. Automation of this nature could offer great opportunities for the improvement of healthcare services and ultimately patients’ health by significantly improving human clinical capabilities in diagnosis, drug discovery, epidemiology, personalised medicine, and operational efficiency. However, if these AI solutions are to be embedded in clinical practice, then at least three issues need to be considered: the technical possibilities and limitations; the ethical, regulatory and legal framework; and the governance framework. In this article, we report on the results of a systematic analysis designed to provide a clear overview of the second of these elements: the ethical, regulatory and legal framework. We find that ethical issues arise at six levels of abstraction (individual, interpersonal, group, institutional, sectoral, and societal) and can be categorised as epistemic, normative, or overarching. We conclude by stressing how important it is that the ethical challenges raised by implementing AI in healthcare settings are tackled proactively rather than reactively and map the key considerations for policymakers to each of the ethical concerns highlighted.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Johnson, Sandra L. J.
AI, Machine Learning, and Ethics in Health Care Journal Article
In: Journal of Legal Medicine, vol. 39, no. 4, pp. 427–441, 2019, ISSN: 0194-7648, (Publisher: Taylor & Francis _eprint: https://doi.org/10.1080/01947648.2019.1690604).
@article{johnson_ai_2019,
title = {AI, Machine Learning, and Ethics in Health Care},
author = {Sandra L. J. Johnson},
url = {https://doi.org/10.1080/01947648.2019.1690604},
doi = {10.1080/01947648.2019.1690604},
issn = {0194-7648},
year = {2019},
date = {2019-10-01},
urldate = {2024-10-21},
journal = {Journal of Legal Medicine},
volume = {39},
number = {4},
pages = {427–441},
abstract = {In the era of technological advancement, referred to by some as the 4th industrial revolution, computer and data science is having a significant impact in healthcare. The healthcare profession is facing ethical issues that we have not had to consider in the past. This article is based on the presentation at the Annual 59th Conference of the American College of Legal Medicine in Los Angeles in February 2019. It was presented as a call to action for the American and Australasian Colleges of Legal Medicine to play a role in this rapidly expanding technology and its application in Healthcare. It discusses broad aspects of artificial intelligence (AI) and machine learning (ML) in healthcare and aims to draw attention to ethical considerations in the use of this technology. There is a need for ethical guidelines, which will require a collaborative effort across many disciplines in medicine and involve input from allied health, computer/data scientists, program designers and various stakeholders in government, as well as the business sector. The ultimate aim is to ensure that best practice principles are used when applying AI and ML to patient data in healthcare systems.},
note = {Publisher: Taylor & Francis
_eprint: https://doi.org/10.1080/01947648.2019.1690604},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Pasquale, Frank
Professional Judgment in an Era of Artificial Intelligence and Machine Learning Journal Article
In: boundary 2, vol. 46, no. 1, pp. 73–101, 2019, ISSN: 0190-3659.
@article{pasquale_professional_2019,
title = {Professional Judgment in an Era of Artificial Intelligence and Machine Learning},
author = {Frank Pasquale},
url = {https://doi.org/10.1215/01903659-7271351},
doi = {10.1215/01903659-7271351},
issn = {0190-3659},
year = {2019},
date = {2019-02-01},
urldate = {2024-10-21},
journal = {boundary 2},
volume = {46},
number = {1},
pages = {73–101},
abstract = {Though artificial intelligence (AI) in healthcare and education now accomplishes diverse tasks, there are two features that tend to unite the information processing behind efforts to substitute it for professionals in these fields: reductionism and functionalism. True believers in substitutive automation tend to model work in human services by reducing the professional role to a set of behaviors initiated by some stimulus, which are intended to accomplish some predetermined goal, or maximize some measure of well-being. However, true professional judgment hinges on a way of knowing the world that is at odds with the epistemology of substitutive automation. Instead of reductionism, an encompassing holism is a hallmark of professional practice—an ability to integrate facts and values, the demands of the particular case and prerogatives of society, and the delicate balance between mission and margin. Any presently plausible vision of substituting AI for education and health-care professionals would necessitate a corrosive reductionism. The only way these sectors can progress is to maintain, at their core, autonomous professionals capable of carefully intermediating between technology and the patients it would help treat, or the students it would help learn.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Froomkin, A. Michael; Kerr, Ian; Pineau, Joelle
When AIs Outperform Doctors: Confronting the Challenges of a Tort-Induced over-Reliance on Machine Learning Journal Article
In: Arizona Law Review, vol. 61, no. 1, pp. 33–100, 2019.
@article{froomkin_when_2019,
title = {When AIs Outperform Doctors: Confronting the Challenges of a Tort-Induced over-Reliance on Machine Learning},
author = {A. Michael Froomkin and Ian Kerr and Joelle Pineau},
url = {https://heinonline.org/HOL/P?h=hein.journals/arz61&i=40},
year = {2019},
date = {2019-01-01},
urldate = {2024-10-21},
journal = {Arizona Law Review},
volume = {61},
number = {1},
pages = {33–100},
abstract = {Someday, perhaps soon, diagnostics generated by machine learning (ML) will have demonstrably better success rates than those generated by human doctors. What will the dominance of ML diagnostics mean for medical malpractice law, for the future of medical service provision, for the demand for certain kinds of doctors, and—in the long run—for the quality of medical diagnostics itself? This Article argues that once ML diagnosticians, such as those based on neural networks, are shown to be superior, existing medical malpractice law will require superior ML-generated medical diagnostics as the standard of care in clinical settings. Further, unless implemented carefully, a physician’s duty to use ML systems in medical diagnostics could, paradoxically, undermine the very safety standard that malpractice law set out to achieve. Although at first doctor + machine may be more effective than either alone because humans and ML systems might make very different kinds of mistakes, in time, as ML systems improve, effective ML could create overwhelming legal and ethical pressure to delegate the diagnostic process to the machine. Ultimately, a similar dynamic might extend to treatment also. If we reach the point where the bulk of clinical outcomes collected in databases are ML-generated diagnoses, this may result in future decisions that are not easily audited or understood by human doctors. Given the well-documented fact that treatment strategies are often not as effective when deployed in clinical practice compared to preliminary evaluation, the lack of transparency introduced by the ML algorithms could lead to a decrease in quality of care. This Article describes salient technical aspects of this scenario particularly as it relates to diagnosis and canvasses various possible technical and legal solutions that would allow us to avoid these unintended consequences of medical malpractice law. Ultimately, we suggest there is a strong case for altering existing medical liability rules to avoid a machine-only diagnostic regime. We argue that the appropriate revision to the standard of care requires maintaining meaningful participation in the loop by physicians the loop.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Cohen, I. Glenn
Informed Consent and Medical Artificial Intelligence: What to Tell the Patient? Symposium: Law and the Nation's Health Journal Article
In: Georgetown Law Journal, vol. 108, no. 6, pp. 1425–1470, 2019.
@article{cohen_informed_2019,
title = {Informed Consent and Medical Artificial Intelligence: What to Tell the Patient? Symposium: Law and the Nation's Health},
author = {I. Glenn Cohen},
url = {https://heinonline.org/HOL/P?h=hein.journals/glj108&i=1444},
year = {2019},
date = {2019-01-01},
urldate = {2024-10-21},
journal = {Georgetown Law Journal},
volume = {108},
number = {6},
pages = {1425–1470},
abstract = {Imagine you are a patient who has been diagnosed with prostate cancer. The two main approaches to treating it in the United States are active surveillance versus the surgical option of radical prostatectomy. Your physician recommends the surgical option, and spends considerable time explaining the steps in the surgery, the benefits of (among other things) eliminating the tumor and the risks of (among other things) erectile dysfunction and urinary incontinence after the surgery. What your physician does not tell you is that she has arrived at her recommendation of prostatectomy over active surveillance based on the analysis of an Artificial Intelligence (AI)/Machine Learning (ML) system, which recommended this treatment plan based on analysis of your age, tumor size, and other personal characteristics found in your electronic health record. Has the doctor secured informed consent from a legal perspective? From an ethical perspective? If the doctor actually chose to “overrule” the AI system, and the doctor fails to tell you that, has she violated your legal or ethical right to informed consent? If you were to find out that the AI/ML system was used to make recommendations on your care and no one told you, how would you feel? Well, come to think of it, do you know whether an AI/ML system was used the last time you saw a physician?
This Article, part of a Symposium in the Georgetown Law Journal, is the first to examine in depth how medical AI/ML interfaces with our concept of informed consent. Part I provides a brief primer on medical Artificial Intelligence and Machine Learning. Part II sets out the core and penumbra of U.S. informed consent law and then seeks to determine to what extent AI/ML involvement in a patient’s health should be disclosed under the current doctrine. Part III examines whether the current doctrine “has it right,” examining more openly empirical and normative approaches to the question.
To forefront my conclusions: while there is some play in the joints, my best reading of the existing legal doctrine is that in general, liability will not lie for failing to inform patients about the use of medical AI/ML to help formulate treatment recommendations. There are a few situations where the doctrine may be more capacious, which I try to draw out (such as when patients inquire, when the medical AI/ML is more opaque, when it is given an outsized role in the final decision-making, or when the AI/ML is used to reduce costs rather than improve patient health), though extending it even here is not certain. I also offer some thoughts on the question: if there is room in the doctrine (either via common law or legislative action), what would it be desirable for the doctrine to look like when it comes to medical AI/ML? I also briefly touch on the question of how the doctrine of informed consent should interact with concerns about biased training data for AI/ML.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
This Article, part of a Symposium in the Georgetown Law Journal, is the first to examine in depth how medical AI/ML interfaces with our concept of informed consent. Part I provides a brief primer on medical Artificial Intelligence and Machine Learning. Part II sets out the core and penumbra of U.S. informed consent law and then seeks to determine to what extent AI/ML involvement in a patient’s health should be disclosed under the current doctrine. Part III examines whether the current doctrine “has it right,” examining more openly empirical and normative approaches to the question.
To forefront my conclusions: while there is some play in the joints, my best reading of the existing legal doctrine is that in general, liability will not lie for failing to inform patients about the use of medical AI/ML to help formulate treatment recommendations. There are a few situations where the doctrine may be more capacious, which I try to draw out (such as when patients inquire, when the medical AI/ML is more opaque, when it is given an outsized role in the final decision-making, or when the AI/ML is used to reduce costs rather than improve patient health), though extending it even here is not certain. I also offer some thoughts on the question: if there is room in the doctrine (either via common law or legislative action), what would it be desirable for the doctrine to look like when it comes to medical AI/ML? I also briefly touch on the question of how the doctrine of informed consent should interact with concerns about biased training data for AI/ML.
Feldman, Robin C.; Aldana, Ehrik; Stein, Kara
Artificial Intelligence in the Health Care Space: How We Can Trust What We Cannot Know Journal Article
In: Stanford Law & Policy Review, vol. 30, no. 2, pp. 399–420, 2019.
@article{feldman_artificial_2019,
title = {Artificial Intelligence in the Health Care Space: How We Can Trust What We Cannot Know},
author = {Robin C. Feldman and Ehrik Aldana and Kara Stein},
url = {https://heinonline.org/HOL/P?h=hein.journals/stanlp30&i=415},
year = {2019},
date = {2019-01-01},
urldate = {2024-10-21},
journal = {Stanford Law & Policy Review},
volume = {30},
number = {2},
pages = {399–420},
abstract = {As Al moves rapidly into the health care field, it promises to revolutionize and transform our approach to medical treatment. The black-box nature of AI, however, produces a shiver of discomfort for many people. How can we trust our health, let alone our very lives, to decisions whose pathways are unknown and impenetrable?
As challenging as these questions may be, they are not insurmountable. And, in fact, the health care field provides the perfect ground for finding our way through these challenges. How can that be? Why would we suggest that a circumstance in which we are putting our lives on the line is the perfect place to learn to trust Al? The answer is quite simple. Health care always has been a place where individuals must put their faith in that which they do not fully understand.
Consider the black box nature of medicine itself. Although there is much we understand about the way in which a drug or a medical treatment works, there is much that we do not. In modern society, however, most people have little difficulty trusting their life to incomprehensible treatments.
This article suggests that the pathways we use to place our trust in medicine provide useful models for learning to trust AI. As we stand on the brink of the Al revolution, our challenge is to create the structures and expertise that give all of society confidence in decision-making and information integrity.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
As challenging as these questions may be, they are not insurmountable. And, in fact, the health care field provides the perfect ground for finding our way through these challenges. How can that be? Why would we suggest that a circumstance in which we are putting our lives on the line is the perfect place to learn to trust Al? The answer is quite simple. Health care always has been a place where individuals must put their faith in that which they do not fully understand.
Consider the black box nature of medicine itself. Although there is much we understand about the way in which a drug or a medical treatment works, there is much that we do not. In modern society, however, most people have little difficulty trusting their life to incomprehensible treatments.
This article suggests that the pathways we use to place our trust in medicine provide useful models for learning to trust AI. As we stand on the brink of the Al revolution, our challenge is to create the structures and expertise that give all of society confidence in decision-making and information integrity.
Terry, Nicolas
Of Regulating Healthcare AI and Robots Journal Article
In: Yale Journal of Law and Technology, vol. 21, no. Special Issue, pp. 133–190, 2019.
@article{terry_regulating_2019,
title = {Of Regulating Healthcare AI and Robots},
author = {Nicolas Terry},
url = {https://heinonline.org/HOL/P?h=hein.journals/yjolt21&i=515},
year = {2019},
date = {2019-01-01},
urldate = {2024-10-21},
journal = {Yale Journal of Law and Technology},
volume = {21},
number = {Special Issue},
pages = {133–190},
abstract = {This article argues that advances in healthcare artificial intelligence (AI) will seriously challenge the robustness and appropriateness of our current healthcare regulatory models. Initially healthcare AI will join other technologies such as big data and mobile health apps in highlighting the current deficiencies in our regulatory models (particularly data protection). In the near future they will challenge regulatory models that use binary formulations such as “safe” or “unsafe.” The regulation of AI will require some fresh thinking. Underpinned by broadly embraced ethical and moral values future AI regulation must be holistic, universal, contextually aware, and responsive to what will be major shifts in the man-machine relationship. The article seeks to provide context for these discussions by suggesting a typology for the healthcare AI technologies of our present and near future, a typology based in part on their substitutive effects. That is followed by a critical look at the existing healthcare regulatory structure as it would be applied to AI. The core of the article then then suggests the imperatives for a new regulatory structure, one that relies less on the senses that we know the “practice of medicine” or “device” when we see it, and more on generally accepted normative principles. Those imperatives include quality, safety, cost-effectiveness, improved data protection, protections against discrimination and in support of health equity.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chung, Jason
What Should We Do About Artificial Intelligence in Health Care? Miscellaneous
2018.
@misc{chung_what_2018,
title = {What Should We Do About Artificial Intelligence in Health Care?},
author = {Jason Chung},
url = {https://papers.ssrn.com/abstract=3113655},
year = {2018},
date = {2018-01-01},
urldate = {2024-10-21},
publisher = {Social Science Research Network},
address = {Rochester, NY},
abstract = {Artificial intelligence (AI) has become an important driver of growth and investment in the health care industry. In perhaps the most eye-catching example, industry heavyweights such as IBM are leveraging the promise of AI to market diagnostic systems such as Watson for Oncology that offer personalized treatment advice.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Price, W. Nicholson II
Artificial Intelligence in Health Care: Applications and Legal Issues Journal Article
In: SciTech Lawyer, vol. 14, no. 1, pp. 10–13, 2017.
@article{price_artificial_2017,
title = {Artificial Intelligence in Health Care: Applications and Legal Issues},
author = {W. Nicholson II Price},
url = {https://heinonline.org/HOL/P?h=hein.aba/scitla0014&i=10},
year = {2017},
date = {2017-01-01},
urldate = {2024-10-21},
journal = {SciTech Lawyer},
volume = {14},
number = {1},
pages = {10–13},
abstract = {Artificial intelligence (AI) is rapidly moving to change the healthcare system. Driven by the juxtaposition of big data and powerful machine learning techniques—terms I will explain momentarily—innovators have begun to develop tools to improve the process of clinical care, to advance medical research, and to improve efficiency. These tools rely on algorithms, programs created from healthcare data that can make predictions or recommendations. However, the algorithms themselves are often too complex for their reasoning to be understood or even stated explicitly. Such algorithms may be best described as “black-box.” This article briefly describes the concept of AI in medicine, including several possible applications, then considers its legal implications in four areas of law: regulation, tort, intellectual property, and privacy.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Pasquale, Frank
Redescribing Health Privacy: The Importance of Information Policy Journal Article
In: Houston Journal of Health Law & Policy, vol. 14, pp. 95–128, 2014.
@article{pasquale_redescribing_2014,
title = {Redescribing Health Privacy: The Importance of Information Policy},
author = {Frank Pasquale},
url = {https://heinonline.org/HOL/P?h=hein.journals/hhpol14&i=109},
year = {2014},
date = {2014-01-01},
urldate = {2024-10-21},
journal = {Houston Journal of Health Law & Policy},
volume = {14},
pages = {95–128},
abstract = {Current conversations about health information policy often tend to be based on three broad assumptions. First, many perceive a tension between regulation and innovation. We often hear that privacy regulations are keeping researchers, companies, and providers from aggregating the data they need to promote innovation. Second, aggregation of fragmented data is seen as a threat to its proper regulation, creating the risk of breaches and other misuse. Third, a prime directive for technicians and policymakers is to give patients ever more granular methods of control over data. This article questions and complicates those assumptions, which I deem (respectively) the Privacy Threat to Research, the Aggregation Threat to Privacy, and the Control Solution.
This article is also intended to enrich our concepts of “fragmentation” and “integration” in health care. There is a good deal of sloganeering around “firewalls” and “vertical integration” as idealized implementations of “fragmentation” and “integration” (respective). The problem, though, is that terms like these (as well as “disruption”) are insufficiently normative to guide large-scale health system change. They describe, but they do not adequately prescribe.
By examining those instances where: a) regulation promotes innovation, and b) increasing (some kinds of) availability of data actually enhances security, confidentiality, and privacy protections, this article attempts to give a richer account of the ethics of fragmentation and integration in the U.S. health care system. But, it also has a darker side, highlighting the inevitable conflicts of values created in a “reputation society” driven by stigmatizing social sorting systems. Personal data control may exacerbate social inequalities. Data aggregation may increase both our powers of research and our vulnerability to breach. The health data policymaking landscape of the next decade will feature a series of intractable conflicts between these important social values.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
This article is also intended to enrich our concepts of “fragmentation” and “integration” in health care. There is a good deal of sloganeering around “firewalls” and “vertical integration” as idealized implementations of “fragmentation” and “integration” (respective). The problem, though, is that terms like these (as well as “disruption”) are insufficiently normative to guide large-scale health system change. They describe, but they do not adequately prescribe.
By examining those instances where: a) regulation promotes innovation, and b) increasing (some kinds of) availability of data actually enhances security, confidentiality, and privacy protections, this article attempts to give a richer account of the ethics of fragmentation and integration in the U.S. health care system. But, it also has a darker side, highlighting the inevitable conflicts of values created in a “reputation society” driven by stigmatizing social sorting systems. Personal data control may exacerbate social inequalities. Data aggregation may increase both our powers of research and our vulnerability to breach. The health data policymaking landscape of the next decade will feature a series of intractable conflicts between these important social values.
Pasquale, Frank
Grand Bargains for Big Data: The Emerging Law of Health Information Journal Article
In: Maryland Law Review, vol. 72, no. 3, pp. 682–772, 2012.
@article{pasquale_grand_2012,
title = {Grand Bargains for Big Data: The Emerging Law of Health Information},
author = {Frank Pasquale},
url = {https://heinonline.org/HOL/P?h=hein.journals/mllr72&i=700},
year = {2012},
date = {2012-01-01},
urldate = {2024-10-21},
journal = {Maryland Law Review},
volume = {72},
number = {3},
pages = {682–772},
abstract = {Health information technology can save lives, cut costs, and expand access to care. But its full promise will only be realized if policymakers broker a “grand bargain” between providers, patients, and administrative agencies. In exchange for subsidizing systems designed to protect intellectual property and secure personally identifiable information, health regulators should have full access to key data those systems collect.
Successful data-mining programs at the Centers for Medicare & Medicaid Services (“CMS”) provide one model. By requiring standardized collection of billing data and hiring private contractors to analyze it, CMS pioneered innovative techniques for punishing fraud. Now it must move beyond deterring illegal conduct and move toward data-driven promotion of best practices.
With this aim in mind, CMS is already subsidizing technology, but more than money is needed to optimize the collection, analysis, and use of data. Policymakers need to navigate intellectual property and privacy rights skillfully. They must condition current (and future) government support for providers and insurers on better collection and dissemination of health information. If they succeed, the law of health information might better incorporate public values than information law generally.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Successful data-mining programs at the Centers for Medicare & Medicaid Services (“CMS”) provide one model. By requiring standardized collection of billing data and hiring private contractors to analyze it, CMS pioneered innovative techniques for punishing fraud. Now it must move beyond deterring illegal conduct and move toward data-driven promotion of best practices.
With this aim in mind, CMS is already subsidizing technology, but more than money is needed to optimize the collection, analysis, and use of data. Policymakers need to navigate intellectual property and privacy rights skillfully. They must condition current (and future) government support for providers and insurers on better collection and dissemination of health information. If they succeed, the law of health information might better incorporate public values than information law generally.