Šķilters, Jurģis; Pokrotnieks, Juris; Derovs, Aleksejs
Towards A Human-AI Hybrid Medicine: Future Medicine — A Hybrid System Where AI Complements Instead of Replaces Humans Journal Article
In: Proceedings of the Latvian Academy of Sciences. Section B. Natural, Exact, and Applied Sciences., vol. 78, no. 4, pp. 233–238, 2024.
@article{skilters_towards_2024,
title = {Towards A Human-AI Hybrid Medicine: Future Medicine — A Hybrid System Where AI Complements Instead of Replaces Humans},
author = {Jurģis Šķilters and Juris Pokrotnieks and Aleksejs Derovs},
url = {https://sciendo.com/article/10.2478/prolas-2024-0032},
doi = {10.2478/prolas-2024-0032},
year = {2024},
date = {2024-09-01},
urldate = {2024-11-22},
journal = {Proceedings of the Latvian Academy of Sciences. Section B. Natural, Exact, and Applied Sciences.},
volume = {78},
number = {4},
pages = {233–238},
abstract = {Our paper provides a critical overview of the advantages, disadvantages, uncertainties, and challenges regarding AI application in medicine. Without denying the importance of the AI in medical applications, we are arguing for a hybrid and complementary view of future medical systems where powerful AI resources are integrated in and with human decision making.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Smith, Helen; Downer, John; Ives, Jonathan
Clinicians and AI use: where is the professional guidance? Journal Article
In: Journal of Medical Ethics, vol. 50, no. 7, pp. 437–441, 2024, ISSN: 0306-6800, 1473-4257, (Publisher: Institute of Medical Ethics Section: Clinical ethics).
@article{smith_clinicians_2024,
title = {Clinicians and AI use: where is the professional guidance?},
author = {Helen Smith and John Downer and Jonathan Ives},
url = {https://jme.bmj.com/content/50/7/437},
doi = {10.1136/jme-2022-108831},
issn = {0306-6800, 1473-4257},
year = {2024},
date = {2024-07-01},
urldate = {2024-11-22},
journal = {Journal of Medical Ethics},
volume = {50},
number = {7},
pages = {437–441},
abstract = {With the introduction of artificial intelligence (AI) to healthcare, there is also a need for professional guidance to support its use. New (2022) reports from National Health Service AI Lab & Health Education England focus on healthcare workers’ understanding and confidence in AI clinical decision support systems (AI-CDDSs), and are concerned with developing trust in, and the trustworthiness of these systems. While they offer guidance to aid developers and purchasers of such systems, they offer little specific guidance for the clinical users who will be required to use them in patient care.
This paper argues that clinical, professional and reputational safety will be risked if this deficit of professional guidance for clinical users of AI-CDDSs is not redressed. We argue it is not enough to develop training for clinical users without first establishing professional guidance regarding the rights and expectations of clinical users.
We conclude with a call to action for clinical regulators: to unite to draft guidance for users of AI-CDDS that helps manage clinical, professional and reputational risks. We further suggest that this exercise offers an opportunity to address fundamental issues in the use of AI-CDDSs; regarding, for example, the fair burden of responsibility for outcomes.},
note = {Publisher: Institute of Medical Ethics
Section: Clinical ethics},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
This paper argues that clinical, professional and reputational safety will be risked if this deficit of professional guidance for clinical users of AI-CDDSs is not redressed. We argue it is not enough to develop training for clinical users without first establishing professional guidance regarding the rights and expectations of clinical users.
We conclude with a call to action for clinical regulators: to unite to draft guidance for users of AI-CDDS that helps manage clinical, professional and reputational risks. We further suggest that this exercise offers an opportunity to address fundamental issues in the use of AI-CDDSs; regarding, for example, the fair burden of responsibility for outcomes.
Maroudas, Vasileios P.
In: Review of European and Comparative Law, vol. 57, no. 2, pp. 135–169, 2024, ISSN: 2545-384X, (Number: 2).
@article{maroudas_faultbased_2024,
title = {Fault–Based Liability for Medical Malpractice in the Age of Artificial Intelligence: Α Comparative Analysis of German and Greek Medical Liability Law in View of the Challenges Posed by AI Systems},
author = {Vasileios P. Maroudas},
url = {https://czasopisma.kul.pl/index.php/recl/article/view/17223},
doi = {10.31743/recl.17223},
issn = {2545-384X},
year = {2024},
date = {2024-06-01},
urldate = {2024-11-22},
journal = {Review of European and Comparative Law},
volume = {57},
number = {2},
pages = {135–169},
abstract = {The rapid developments in the field of AI pose intractable problems for the law of civil liability. The main question that arises in this context is whether a fault-based liability regime can provide sufficient protection to victims of harm caused by the use of ΑΙ. This article addresses this question specifically in relation to medical malpractice liability. Its main purpose is to outline the problems that autonomous systems pose for medical liability law, but more importantly, to determine whether and to what extent a fault-based system of medical liability can adequately address them. In order to approach this issue, a comparative examination of German and Greek law will be undertaken. These two systems, while similar in substantive terms, differ significantly at the level of the burden of proof. In this sense, their comparison serves as a good example to “test” the adequacy of the fault principle in relation to AI systems in the field of medicine, but also to illustrate the practical importance that rules on the allocation of the burden of proof can have in cases of damage caused by the use of AI. As will eventually become apparent, the main problem appears to lie not in the fault principle itself, which, for the time being, at least in the form of objectified negligence, seems to protect the patient adequately, but mainly in the general rule for the allocation of the burden of proof, which is precisely why the fault principle ends up working to the detriment of the patient.},
note = {Number: 2},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Savulescu, Julian; Giubilini, Alberto; Vandersluis, Robert; Mishra, Abhishek
Ethics of artificial intelligence in medicine Journal Article
In: Singapore Medical Journal, vol. 65, no. 3, pp. 150, 2024, ISSN: 0037-5675.
@article{savulescu_ethics_2024,
title = {Ethics of artificial intelligence in medicine},
author = {Julian Savulescu and Alberto Giubilini and Robert Vandersluis and Abhishek Mishra},
url = {https://journals.lww.com/smj/fulltext/2024/03000/ethics_of_artificial_intelligence_in_medicine.5.aspx},
doi = {10.4103/singaporemedj.SMJ-2023-279},
issn = {0037-5675},
year = {2024},
date = {2024-03-01},
urldate = {2024-11-22},
journal = {Singapore Medical Journal},
volume = {65},
number = {3},
pages = {150},
abstract = {This article reviews the main ethical issues that arise from the use of artificial intelligence (AI) technologies in medicine. Issues around trust, responsibility, risks of discrimination, privacy, autonomy, and potential benefits and harms are assessed. For better or worse, AI is a promising technology that can revolutionise healthcare delivery. It is up to us to make AI a tool for the good by ensuring that ethical oversight accompanies the design, development and implementation of AI technology in clinical practice.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Vo, Vinh; Chen, Gang; Aquino, Yves Saint James; Carter, Stacy M.; Do, Quynh Nga; Woode, Maame Esi
Multi-stakeholder preferences for the use of artificial intelligence in healthcare: A systematic review and thematic analysis Journal Article
In: Social Science & Medicine, vol. 338, pp. 116357, 2023, ISSN: 0277-9536.
@article{vo_multi-stakeholder_2023,
title = {Multi-stakeholder preferences for the use of artificial intelligence in healthcare: A systematic review and thematic analysis},
author = {Vinh Vo and Gang Chen and Yves Saint James Aquino and Stacy M. Carter and Quynh Nga Do and Maame Esi Woode},
url = {https://www.sciencedirect.com/science/article/pii/S0277953623007141},
doi = {10.1016/j.socscimed.2023.116357},
issn = {0277-9536},
year = {2023},
date = {2023-12-01},
urldate = {2024-11-22},
journal = {Social Science & Medicine},
volume = {338},
pages = {116357},
abstract = {Introduction
Despite the proliferation of Artificial Intelligence (AI) technology over the last decade, clinician, patient, and public perceptions of its use in healthcare raise a number of ethical, legal and social questions. We systematically review the literature on attitudes towards the use of AI in healthcare from patients, the general public and health professionals’ perspectives to understand these issues from multiple perspectives.
Methodology
A search for original research articles using qualitative, quantitative, and mixed methods published between 1 Jan 2001 to 24 Aug 2021 was conducted on six bibliographic databases. Data were extracted and classified into different themes representing views on: (i) knowledge and familiarity of AI, (ii) AI benefits, risks, and challenges, (iii) AI acceptability, (iv) AI development, (v) AI implementation, (vi) AI regulations, and (vii) Human – AI relationship.
Results
The final search identified 7,490 different records of which 105 publications were selected based on predefined inclusion/exclusion criteria. While the majority of patients, the general public and health professionals generally had a positive attitude towards the use of AI in healthcare, all groups indicated some perceived risks and challenges. Commonly perceived risks included data privacy; reduced professional autonomy; algorithmic bias; healthcare inequities; and greater burnout to acquire AI-related skills. While patients had mixed opinions on whether healthcare workers suffer from job loss due to the use of AI, health professionals strongly indicated that AI would not be able to completely replace them in their professions. Both groups shared similar doubts about AI's ability to deliver empathic care. The need for AI validation, transparency, explainability, and patient and clinical involvement in the development of AI was emphasised. To help successfully implement AI in health care, most participants envisioned that an investment in training and education campaigns was necessary, especially for health professionals. Lack of familiarity, lack of trust, and regulatory uncertainties were identified as factors hindering AI implementation. Regarding AI regulations, key themes included data access and data privacy. While the general public and patients exhibited a willingness to share anonymised data for AI development, there remained concerns about sharing data with insurance or technology companies. One key domain under this theme was the question of who should be held accountable in the case of adverse events arising from using AI.
Conclusions
While overall positivity persists in attitudes and preferences toward AI use in healthcare, some prevalent problems require more attention. There is a need to go beyond addressing algorithm-related issues to look at the translation of legislation and guidelines into practice to ensure fairness, accountability, transparency, and ethics in AI.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Despite the proliferation of Artificial Intelligence (AI) technology over the last decade, clinician, patient, and public perceptions of its use in healthcare raise a number of ethical, legal and social questions. We systematically review the literature on attitudes towards the use of AI in healthcare from patients, the general public and health professionals’ perspectives to understand these issues from multiple perspectives.
Methodology
A search for original research articles using qualitative, quantitative, and mixed methods published between 1 Jan 2001 to 24 Aug 2021 was conducted on six bibliographic databases. Data were extracted and classified into different themes representing views on: (i) knowledge and familiarity of AI, (ii) AI benefits, risks, and challenges, (iii) AI acceptability, (iv) AI development, (v) AI implementation, (vi) AI regulations, and (vii) Human – AI relationship.
Results
The final search identified 7,490 different records of which 105 publications were selected based on predefined inclusion/exclusion criteria. While the majority of patients, the general public and health professionals generally had a positive attitude towards the use of AI in healthcare, all groups indicated some perceived risks and challenges. Commonly perceived risks included data privacy; reduced professional autonomy; algorithmic bias; healthcare inequities; and greater burnout to acquire AI-related skills. While patients had mixed opinions on whether healthcare workers suffer from job loss due to the use of AI, health professionals strongly indicated that AI would not be able to completely replace them in their professions. Both groups shared similar doubts about AI's ability to deliver empathic care. The need for AI validation, transparency, explainability, and patient and clinical involvement in the development of AI was emphasised. To help successfully implement AI in health care, most participants envisioned that an investment in training and education campaigns was necessary, especially for health professionals. Lack of familiarity, lack of trust, and regulatory uncertainties were identified as factors hindering AI implementation. Regarding AI regulations, key themes included data access and data privacy. While the general public and patients exhibited a willingness to share anonymised data for AI development, there remained concerns about sharing data with insurance or technology companies. One key domain under this theme was the question of who should be held accountable in the case of adverse events arising from using AI.
Conclusions
While overall positivity persists in attitudes and preferences toward AI use in healthcare, some prevalent problems require more attention. There is a need to go beyond addressing algorithm-related issues to look at the translation of legislation and guidelines into practice to ensure fairness, accountability, transparency, and ethics in AI.
Hedderich, Dennis M.; Weisstanner, Christian; Cauter, Sofie Van; Federau, Christian; Edjlali, Myriam; Radbruch, Alexander; Gerke, Sara; Haller, Sven
Artificial intelligence tools in clinical neuroradiology: essential medico-legal aspects Journal Article
In: Neuroradiology, vol. 65, no. 7, pp. 1091–1099, 2023, ISSN: 1432-1920.
@article{hedderich_artificial_2023,
title = {Artificial intelligence tools in clinical neuroradiology: essential medico-legal aspects},
author = {Dennis M. Hedderich and Christian Weisstanner and Sofie Van Cauter and Christian Federau and Myriam Edjlali and Alexander Radbruch and Sara Gerke and Sven Haller},
url = {https://doi.org/10.1007/s00234-023-03152-7},
doi = {10.1007/s00234-023-03152-7},
issn = {1432-1920},
year = {2023},
date = {2023-07-01},
urldate = {2024-11-22},
journal = {Neuroradiology},
volume = {65},
number = {7},
pages = {1091–1099},
abstract = {Commercial software based on artificial intelligence (AI) is entering clinical practice in neuroradiology. Consequently, medico-legal aspects of using Software as a Medical Device (SaMD) become increasingly important. These medico-legal issues warrant an interdisciplinary approach and may affect the way we work in daily practice. In this article, we seek to address three major topics: medical malpractice liability, regulation of AI-based medical devices, and privacy protection in shared medical imaging data, thereby focusing on the legal frameworks of the European Union and the USA. As many of the presented concepts are very complex and, in part, remain yet unsolved, this article is not meant to be comprehensive but rather thought-provoking. The goal is to engage clinical neuroradiologists in the debate and equip them to actively shape these topics in the future.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nitiéma, Pascal
Artificial Intelligence in Medicine: Text Mining of Health Care Workers’ Opinions Journal Article
In: Journal of Medical Internet Research, vol. 25, no. 1, pp. e41138, 2023, (Company: Journal of Medical Internet Research Distributor: Journal of Medical Internet Research Institution: Journal of Medical Internet Research Label: Journal of Medical Internet Research Publisher: JMIR Publications Inc., Toronto, Canada).
@article{nitiema_artificial_2023,
title = {Artificial Intelligence in Medicine: Text Mining of Health Care Workers’ Opinions},
author = {Pascal Nitiéma},
url = {https://www.jmir.org/2023/1/e41138},
doi = {10.2196/41138},
year = {2023},
date = {2023-01-01},
urldate = {2024-11-22},
journal = {Journal of Medical Internet Research},
volume = {25},
number = {1},
pages = {e41138},
abstract = {Background: Artificial intelligence (AI) is being increasingly adopted in the health care industry for administrative tasks, patient care operations, and medical research.
Objective: We aimed to examine health care workers’ opinions about the adoption and implementation of AI-powered technology in the health care industry.
Methods: Data were comments about AI posted on a web-based forum by 905 health care professionals from at least 77 countries, from May 2013 to October 2021. Structural topic modeling was used to identify the topics of discussion, and hierarchical clustering was performed to determine how these topics cluster into different groups.
Results: Overall, 12 topics were identified from the collected comments. These comments clustered into 2 groups: impact of AI on health care system and practice and AI as a tool for disease screening, diagnosis, and treatment. Topics associated with negative sentiments included concerns about AI replacing human workers, impact of AI on traditional medical diagnostic procedures (ie, patient history and physical examination), accuracy of the algorithm, and entry of IT companies into the health care industry. Concerns about the legal liability for using AI in treating patients were also discussed. Positive topics about AI included the opportunity offered by the technology for improving the accuracy of image-based diagnosis and for enhancing personalized medicine.
Conclusions: The adoption and implementation of AI applications in the health care industry are eliciting both enthusiasm and concerns about patient care quality and the future of health care professions. The successful implementation of AI-powered technologies requires the involvement of all stakeholders, including patients, health care organization workers, health insurance companies, and government regulatory agencies.},
note = {Company: Journal of Medical Internet Research
Distributor: Journal of Medical Internet Research
Institution: Journal of Medical Internet Research
Label: Journal of Medical Internet Research
Publisher: JMIR Publications Inc., Toronto, Canada},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Objective: We aimed to examine health care workers’ opinions about the adoption and implementation of AI-powered technology in the health care industry.
Methods: Data were comments about AI posted on a web-based forum by 905 health care professionals from at least 77 countries, from May 2013 to October 2021. Structural topic modeling was used to identify the topics of discussion, and hierarchical clustering was performed to determine how these topics cluster into different groups.
Results: Overall, 12 topics were identified from the collected comments. These comments clustered into 2 groups: impact of AI on health care system and practice and AI as a tool for disease screening, diagnosis, and treatment. Topics associated with negative sentiments included concerns about AI replacing human workers, impact of AI on traditional medical diagnostic procedures (ie, patient history and physical examination), accuracy of the algorithm, and entry of IT companies into the health care industry. Concerns about the legal liability for using AI in treating patients were also discussed. Positive topics about AI included the opportunity offered by the technology for improving the accuracy of image-based diagnosis and for enhancing personalized medicine.
Conclusions: The adoption and implementation of AI applications in the health care industry are eliciting both enthusiasm and concerns about patient care quality and the future of health care professions. The successful implementation of AI-powered technologies requires the involvement of all stakeholders, including patients, health care organization workers, health insurance companies, and government regulatory agencies.
Poon, Aaron I F; Sung, Joseph J Y
Opening the black box of AI-Medicine Journal Article
In: Journal of Gastroenterology and Hepatology, vol. 36, no. 3, pp. 581–584, 2021, ISSN: 1440-1746, (_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/jgh.15384).
@article{poon_opening_2021,
title = {Opening the black box of AI-Medicine},
author = {Aaron I F Poon and Joseph J Y Sung},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/jgh.15384},
doi = {10.1111/jgh.15384},
issn = {1440-1746},
year = {2021},
date = {2021-01-01},
urldate = {2024-11-22},
journal = {Journal of Gastroenterology and Hepatology},
volume = {36},
number = {3},
pages = {581–584},
abstract = {One of the biggest challenges of utilizing artificial intelligence (AI) in medicine is that physicians are reluctant to trust and adopt something that they do not fully understand and regarded as a “black box.” Machine Learning (ML) can assist in reading radiological, endoscopic and histological pictures, suggesting diagnosis and predict disease outcome, and even recommending therapy and surgical decisions. However, clinical adoption of these AI tools has been slow because of a lack of trust. Besides clinician's doubt, patients lacking confidence with AI-powered technologies also hamper development. While they may accept the reality that human errors can occur, little tolerance of machine error is anticipated. In order to implement AI medicine successfully, interpretability of ML algorithm needs to improve. Opening the black box in AI medicine needs to take a stepwise approach. Small steps of biological explanation and clinical experience in ML algorithm can help to build trust and acceptance. AI software developers will have to clearly demonstrate that when the ML technologies are integrated into the clinical decision-making process, they can actually help to improve clinical outcome. Enhancing interpretability of ML algorithm is a crucial step in adopting AI in medicine.},
note = {_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/jgh.15384},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Johnson, Kevin B.; Wei, Wei-Qi; Weeraratne, Dilhan; Frisse, Mark E.; Misulis, Karl; Rhee, Kyu; Zhao, Juan; Snowdon, Jane L.
Precision Medicine, AI, and the Future of Personalized Health Care Journal Article
In: Clinical and Translational Science, vol. 14, no. 1, pp. 86–93, 2021, ISSN: 1752-8062, (_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/cts.12884).
@article{johnson_precision_2021,
title = {Precision Medicine, AI, and the Future of Personalized Health Care},
author = {Kevin B. Johnson and Wei-Qi Wei and Dilhan Weeraratne and Mark E. Frisse and Karl Misulis and Kyu Rhee and Juan Zhao and Jane L. Snowdon},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/cts.12884},
doi = {10.1111/cts.12884},
issn = {1752-8062},
year = {2021},
date = {2021-01-01},
urldate = {2024-11-22},
journal = {Clinical and Translational Science},
volume = {14},
number = {1},
pages = {86–93},
abstract = {The convergence of artificial intelligence (AI) and precision medicine promises to revolutionize health care. Precision medicine methods identify phenotypes of patients with less-common responses to treatment or unique healthcare needs. AI leverages sophisticated computation and inference to generate insights, enables the system to reason and learn, and empowers clinician decision making through augmented intelligence. Recent literature suggests that translational research exploring this convergence will help solve the most difficult challenges facing precision medicine, especially those in which nongenomic and genomic determinants, combined with information from patient symptoms, clinical history, and lifestyles, will facilitate personalized diagnosis and prognostication.},
note = {_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/cts.12884},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Tobia, Kevin; Nielsen, Aileen; Stremitzer, Alexander
When Does Physician Use of AI Increase Liability? Journal Article
In: Journal of Nuclear Medicine, vol. 62, no. 1, pp. 17–21, 2021, ISSN: 0161-5505, 2159-662X, (Publisher: Society of Nuclear Medicine Section: Artificial Intelligence).
@article{tobia_when_2021,
title = {When Does Physician Use of AI Increase Liability?},
author = {Kevin Tobia and Aileen Nielsen and Alexander Stremitzer},
url = {https://jnm.snmjournals.org/content/62/1/17},
doi = {10.2967/jnumed.120.256032},
issn = {0161-5505, 2159-662X},
year = {2021},
date = {2021-01-01},
urldate = {2024-11-22},
journal = {Journal of Nuclear Medicine},
volume = {62},
number = {1},
pages = {17–21},
abstract = {An increasing number of automated and artificial intelligence (AI) systems make medical treatment recommendations, including personalized recommendations, which can deviate from standard care. Legal scholars argue that following such nonstandard treatment recommendations will increase liability in medical malpractice, undermining the use of potentially beneficial medical AI. However, such liability depends in part on lay judgments by jurors: when physicians use AI systems, in which circumstances would jurors hold physicians liable? Methods: To determine potential jurors’ judgments of liability, we conducted an online experimental study of a nationally representative sample of 2,000 U.S. adults. Each participant read 1 of 4 scenarios in which an AI system provides a treatment recommendation to a physician. The scenarios varied the AI recommendation (standard or nonstandard care) and the physician’s decision (to accept or reject that recommendation). Subsequently, the physician’s decision caused harm. Participants then assessed the physician’s liability. Results: Our results indicate that physicians who receive advice from an AI system to provide standard care can reduce the risk of liability by accepting, rather than rejecting, that advice, all else being equal. However, when an AI system recommends nonstandard care, there is no similar shielding effect of rejecting that advice and so providing standard care. Conclusion: The tort law system is unlikely to undermine the use of AI precision medicine tools and may even encourage the use of these tools.},
note = {Publisher: Society of Nuclear Medicine
Section: Artificial Intelligence},
keywords = {},
pubstate = {published},
tppubtype = {article}
}