Koshiyama, Adriano; Kazim, Emre; Treleaven, Philip; Rai, Pete; Szpruch, Lukasz; Pavey, Giles; Ahamat, Ghazi; Leutner, Franziska; Goebel, Randy; Knight, Andrew; Adams, Janet; Hitrova, Christina; Barnett, Jeremy; Nachev, Parashkev; Barber, David; Chamorro-Premuzic, Tomas; Klemmer, Konstantin; Gregorovic, Miro; Khan, Shakeel; Lomas, Elizabeth; Hilliard, Airlie; Chatterjee, Siddhant
Towards algorithm auditing: managing legal, ethical and technological risks of AI, ML and associated algorithms Journal Article
In: Royal Society Open Science, vol. 11, no. 5, pp. 230859, 2024, (Publisher: Royal Society).
@article{koshiyama_towards_2024,
title = {Towards algorithm auditing: managing legal, ethical and technological risks of AI, ML and associated algorithms},
author = {Adriano Koshiyama and Emre Kazim and Philip Treleaven and Pete Rai and Lukasz Szpruch and Giles Pavey and Ghazi Ahamat and Franziska Leutner and Randy Goebel and Andrew Knight and Janet Adams and Christina Hitrova and Jeremy Barnett and Parashkev Nachev and David Barber and Tomas Chamorro-Premuzic and Konstantin Klemmer and Miro Gregorovic and Shakeel Khan and Elizabeth Lomas and Airlie Hilliard and Siddhant Chatterjee},
url = {https://royalsocietypublishing.org/doi/10.1098/rsos.230859},
doi = {10.1098/rsos.230859},
year = {2024},
date = {2024-05-01},
urldate = {2024-10-21},
journal = {Royal Society Open Science},
volume = {11},
number = {5},
pages = {230859},
abstract = {Business reliance on algorithms is becoming ubiquitous, and companies are increasingly concerned about their algorithms causing major financial or reputational damage. High-profile cases include Google’s AI algorithm for photo classification mistakenly labelling a black couple as gorillas in 2015 (Gebru 2020 In The Oxford handbook of ethics of AI, pp. 251–269), Microsoft’s AI chatbot Tay that spread racist, sexist and antisemitic speech on Twitter (now X) (Wolf et al. 2017 ACM Sigcas Comput. Soc. 47, 54–64 (doi:10.1145/3144592.3144598)), and Amazon’s AI recruiting tool being scrapped after showing bias against women. In response, governments are legislating and imposing bans, regulators fining companies and the judiciary discussing potentially making algorithms artificial ‘persons’ in law. As with financial audits, governments, business and society will require algorithm audits; formal assurance that algorithms are legal, ethical and safe. A new industry is envisaged: Auditing and Assurance of Algorithms (cf. data privacy), with the remit to professionalize and industrialize AI, ML and associated algorithms. The stakeholders range from those working on policy/regulation to industry practitioners and developers. We also anticipate the nature and scope of the auditing levels and framework presented will inform those interested in systems of governance and compliance with regulation/standards. Our goal in this article is to survey the key areas necessary to perform auditing and assurance and instigate the debate in this novel area of research and practice.},
note = {Publisher: Royal Society},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Shivram, Vivek
Auditing with Ai: A Theoretical Framework for Applying Machine Learning Across the Internal Audit Lifecycle Journal Article
In: EDPACS, vol. 69, no. 1, pp. 22–40, 2024, ISSN: 0736-6981, (Publisher: Taylor & Francis _eprint: https://doi.org/10.1080/07366981.2024.2312025).
@article{shivram_auditing_2024,
title = {Auditing with Ai: A Theoretical Framework for Applying Machine Learning Across the Internal Audit Lifecycle},
author = {Vivek Shivram},
url = {https://doi.org/10.1080/07366981.2024.2312025},
doi = {10.1080/07366981.2024.2312025},
issn = {0736-6981},
year = {2024},
date = {2024-01-01},
urldate = {2024-10-21},
journal = {EDPACS},
volume = {69},
number = {1},
pages = {22–40},
abstract = {Artificial Intelligence (hereinafter AI), and specifically, Machine Learning (hereinafter, ML), has shown tremendous potential to revolutionize the internal audit (hereinafter, IA) profession, from enabling audit coverage of entire test populations, to introducing objectivity in the analysis of key areas. However, prior literature shows that the multiplicity of innovation options can be overwhelming. This paper aims to offer a theoretical framework that would enable audit practitioners, within both industry and professional services, to consider how ML capabilities can be harnessed to their fullest potential across the internal audit lifecycle, from audit planning to reporting. The paper discusses how DA and ML capabilities relate to the internal audit function’s (hereinafter, IAF) remit, drawing from extant literature. In doing so, the paper identifies the most specific options available to IAFs to drive innovation across each segment of the audit lifecycle, leveraging various DA and ML techniques, and supports the assertion that auditors require a continuous innovation mind-set to be effective change agents. The paper also draws out the requirement for effective guardrails, especially with emerging technology, such as Generative AI. Finally, the paper discusses how the value arising from these efforts can be measured by IAFs.},
note = {Publisher: Taylor & Francis
_eprint: https://doi.org/10.1080/07366981.2024.2312025},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ozili, Peterson K.
Big Data and Artificial Intelligence for Financial Inclusion: Benefits and Issues Book Section
In: Artificial Intelligence, Fintech, and Financial Inclusion, CRC Press, 2023, ISBN: 978-1-00-312520-4, (Num Pages: 10).
@incollection{ozili_big_2023,
title = {Big Data and Artificial Intelligence for Financial Inclusion: Benefits and Issues},
author = {Peterson K. Ozili},
isbn = {978-1-00-312520-4},
year = {2023},
date = {2023-01-01},
booktitle = {Artificial Intelligence, Fintech, and Financial Inclusion},
publisher = {CRC Press},
abstract = {This chapter discusses the benefits and drawbacks of using big data and artificial intelligence (AI) to broaden access to financial services. Increased effectiveness and risk management for providers of financial services, access to innovative financial products and services for financially stable adults, a less cumbersome account-opening process for underbanked adults, and credit scores based on alternative data sources for those without bank accounts are just a few of the ways that AI and big data are facilitating financial inclusion. A dearth of qualified AI employees, increased unemployment in the financial ecosystem, the existence of unconscious bias in the design of AI systems, and other barriers brought on by data protection legislation are some of the issues with employing AI and big data for financial inclusion.},
note = {Num Pages: 10},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Commerford, Benjamin P.; Dennis, Sean A.; Joe, Jennifer R.; Ulla, Jenny W.
Man Versus Machine: Complex Estimates and Auditor Reliance on Artificial Intelligence Journal Article
In: Journal of Accounting Research, vol. 60, no. 1, pp. 171–201, 2022, ISSN: 1475-679X, (_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/1475-679X.12407).
@article{commerford_man_2022,
title = {Man Versus Machine: Complex Estimates and Auditor Reliance on Artificial Intelligence},
author = {Benjamin P. Commerford and Sean A. Dennis and Jennifer R. Joe and Jenny W. Ulla},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/1475-679X.12407},
doi = {10.1111/1475-679X.12407},
issn = {1475-679X},
year = {2022},
date = {2022-01-01},
urldate = {2024-10-21},
journal = {Journal of Accounting Research},
volume = {60},
number = {1},
pages = {171–201},
abstract = {Audit firms are investing billions of dollars to develop artificial intelligence (AI) systems that will help auditors execute challenging tasks (e.g., evaluating complex estimates). Although firms assume AI will enhance audit quality, a growing body of research documents that individuals often exhibit “algorithm aversion”—the tendency to discount computer-based advice more heavily than human advice, although the advice is identical otherwise. Therefore, we conduct an experiment to examine how algorithm aversion manifests in auditor judgments. Consistent with theory, we find that auditors receiving contradictory evidence from their firm's AI system (instead of a human specialist) propose smaller adjustments to management's complex estimates, particularly when management develops their estimates using relatively objective (vs. subjective) inputs. Our findings suggest auditor susceptibility to algorithm aversion could prove costly for the profession and financial statements users.},
note = {_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/1475-679X.12407},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
i Alonso, Miquel Noguer; Batres-Estrada, Gilberto; Yahiaoui, Ghozlane
A Meta-Learning approach to Model Uncertainty in Financial Time Series Miscellaneous
2021.
@misc{noguer_i_alonso_meta-learning_2021,
title = {A Meta-Learning approach to Model Uncertainty in Financial Time Series},
author = {Miquel Noguer i Alonso and Gilberto Batres-Estrada and Ghozlane Yahiaoui},
url = {https://papers.ssrn.com/abstract=3814938},
doi = {10.2139/ssrn.3814938},
year = {2021},
date = {2021-03-01},
urldate = {2024-10-21},
publisher = {Social Science Research Network},
address = {Rochester, NY},
abstract = {Financial markets have experienced several negative sigma events in recent years; these eventsoccur with much more regularity than current risk models can predict. There is no guarantee thatthe training set's data generating process will be the same in the test set in finance. Mathematicalmodels are designed to operate with unlimited and changing data, and yet, actual events keepmaking life hard for most models. The assumption of independent and identically distributedrandom variables and a stationary time series do not hold in reality. Over-reliance on historical data and backtesting of models is not a sufficient approach to overcome these challenges.Reinforcement-learning faces similar challenges when applied to financial time series.Out-of-distribution generalization is a problem that cannot be solved without assumptions onthe data generating process. If the test data is arbitrary or unrelated to the training data, thengeneralization is not possible. Finding these particular principles could potentially help us buildAI and financial modeling systems. N-Beats, Oreshkin et al. [2020], is a deep neural architecturewith backward and forward residual links and a deep stack of fully-connected layers. N-Beats canbe considered as a meta-learning model for time series prediction. Meta-Learning is a machinelearning approach that intends to design models that can learn new skills or adapt to new environments rapidly with few training examples. We explore the performance of N-Beats and compareits performance with other deep learning models. The results are not conclusive in establishingN-Beats as a better model than the other models tested in this study. We show in this study thatother neural network-based models offer similar performance.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Azzutti, Alessio; Ringe, Wolf-Georg; Stiehl, H. Siegfried
Machine Learning, Market Manipulation, and Collusion on Capital Markets: Why the "Black Box" Matters Journal Article
In: University of Pennsylvania Journal of International Law, vol. 43, no. 1, pp. 79–136, 2021.
@article{azzutti_machine_2021,
title = {Machine Learning, Market Manipulation, and Collusion on Capital Markets: Why the "Black Box" Matters},
author = {Alessio Azzutti and Wolf-Georg Ringe and H. Siegfried Stiehl},
url = {https://heinonline.org/HOL/P?h=hein.journals/upjiel43&i=83},
year = {2021},
date = {2021-01-01},
urldate = {2024-10-21},
journal = {University of Pennsylvania Journal of International Law},
volume = {43},
number = {1},
pages = {79–136},
abstract = {This Article offers a novel perspective on the implications of increasingly autonomous and “black box” algorithms, within the ramification of algorithmic trading, for the integrity of capital markets. Artificial intelligence (AI) and particularly its subfield of machine learning (ML) methods have gained immense popularity among the great public and achieved tremendous success in many real-life applications by leading to vast efficiency gains. In the financial trading domain, ML can augment human capabilities in price prediction, dynamic portfolio optimization, and other financial decision-making tasks. However, thanks to constant progress in the ML technology, the prospect of increasingly capable and autonomous agents to delegate operational tasks and even decision-making is now beyond mere imagination, thus opening up the possibility for approximating (truly) autonomous trading agents anytime soon.
Given these spectacular developments, this Article argues that such autonomous algorithmic traders may involve significant risks to market integrity, independent from their human experts, thanks to self-learning capabilities offered by state-of-the-art and innovative ML methods. Using the proprietary trading industry as a case study, we explore emerging threats to the application of established market abuse laws in the event of algorithmic market abuse, by taking an interdisciplinary stance between financial regulation, law and economics, and computational finance. Specifically, our analysis focuses on two emerging market abuse risks by autonomous algorithms: market manipulation and “tacit” collusion. We explore their likelihood to arise in global capital markets and evaluate related social harm as forms of market failures.
With these new risks in mind, this Article questions the adequacy of existing regulatory frameworks and enforcement mechanisms, as well as current legal rules on the governance of algorithmic trading, to cope with increasingly autonomous and ubiquitous algorithmic trading systems. We demonstrate how the “black box” nature of specific ML-powered algorithmic trading strategies can subvert existing market abuse laws, which are based upon traditional liability concepts and tests (such as “intent” and “causation”). We conclude by addressing the shortcomings of the present legal framework and develop a number of guiding principles to assist legal and policy reform in the spirit of promoting and safeguarding market integrity and safety.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Given these spectacular developments, this Article argues that such autonomous algorithmic traders may involve significant risks to market integrity, independent from their human experts, thanks to self-learning capabilities offered by state-of-the-art and innovative ML methods. Using the proprietary trading industry as a case study, we explore emerging threats to the application of established market abuse laws in the event of algorithmic market abuse, by taking an interdisciplinary stance between financial regulation, law and economics, and computational finance. Specifically, our analysis focuses on two emerging market abuse risks by autonomous algorithms: market manipulation and “tacit” collusion. We explore their likelihood to arise in global capital markets and evaluate related social harm as forms of market failures.
With these new risks in mind, this Article questions the adequacy of existing regulatory frameworks and enforcement mechanisms, as well as current legal rules on the governance of algorithmic trading, to cope with increasingly autonomous and ubiquitous algorithmic trading systems. We demonstrate how the “black box” nature of specific ML-powered algorithmic trading strategies can subvert existing market abuse laws, which are based upon traditional liability concepts and tests (such as “intent” and “causation”). We conclude by addressing the shortcomings of the present legal framework and develop a number of guiding principles to assist legal and policy reform in the spirit of promoting and safeguarding market integrity and safety.
Chander, Anupam
Artificial Intelligence and Trade Book Section
In: Burri, Mira (Ed.): Big Data and Global Trade Law, pp. 115–127, Cambridge University Press, Cambridge, 2021, ISBN: 978-1-108-84359-1.
@incollection{burri_artificial_2021,
title = {Artificial Intelligence and Trade},
author = {Anupam Chander},
editor = {Mira Burri},
url = {https://www.cambridge.org/core/books/big-data-and-global-trade-law/artificial-intelligence-and-trade/4A03E8C7FA10640DB3791FB1503EA7C9},
doi = {10.1017/9781108919234.008},
isbn = {978-1-108-84359-1},
year = {2021},
date = {2021-01-01},
urldate = {2024-10-21},
booktitle = {Big Data and Global Trade Law},
pages = {115–127},
publisher = {Cambridge University Press},
address = {Cambridge},
abstract = {Artificial Intelligence is already powering trade today. It is crossing borders, learning, making decisions, and operating cyber-physical systems. It underlies many of the services that are offered today – from customer service chatbots to customer relations software to business processes. The chapter considers AI regulation from the perspective of international trade law. It argues that foreign AI should be regulated by governments – indeed that AI must be ‘locally responsible’. The chapter refutes arguments that trade law should not apply to AI and shows how the WTO agreements might apply to AI using two hypothetical cases . The analysis reveals how the WTO agreements leave room for governments to insist on locally responsible AI, while at the same time promoting international trade powered by AI.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Cao, Longbing
AI in Finance: A Review Miscellaneous
2020.
@misc{cao_ai_2020,
title = {AI in Finance: A Review},
author = {Longbing Cao},
url = {https://papers.ssrn.com/abstract=3647625},
doi = {10.2139/ssrn.3647625},
year = {2020},
date = {2020-07-01},
urldate = {2024-10-21},
publisher = {Social Science Research Network},
address = {Rochester, NY},
abstract = {The recent booming of AI in FinTech evidences the significant developments and potential of AI for making smart FinTech, economy, finance and society. AI-empowered smart FinTech has emerged as a sexy and increasingly critical area in AI, data science, economics, finance, and other relevant research disciplines and business domains. This trend was built on the long history of AI in finance, and the new-generation AI, data science and machine learning are fundamentally and seamlessly transforming the vision, missions, objectives, paradigms, theories, approaches, tools and social aspects of economics and finance and driving smart FinTech. AI is empowering more personalized and advanced and better, safer and newer mainstream and alternative economic-financial mechanisms, products, models, services, systems, and applications. This review summarizes the lasting research on AI in finance and focuses on creating a comprehensive, multidimensional and economic-financial problem-driven research landscape of the roles, research directions and opportunities of AI in new-generation FinTech and finance.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Firouzi, Hassan Omidi; Wang, Sean
A Fresh Look at Internal Audit Framework at the Age of Artificial Intelligence (AI) Miscellaneous
2020.
@misc{omidi_firouzi_fresh_2020,
title = {A Fresh Look at Internal Audit Framework at the Age of Artificial Intelligence (AI)},
author = {Hassan Omidi Firouzi and Sean Wang},
url = {https://papers.ssrn.com/abstract=3595389},
doi = {10.2139/ssrn.3595389},
year = {2020},
date = {2020-05-01},
urldate = {2024-10-21},
publisher = {Social Science Research Network},
address = {Rochester, NY},
abstract = {With the rise of Artificial Intelligence (AI), industries embraced or are preparing to embrace its potentials. Financial industries, banking industry in particular, are unleashing and harnessing AI powers in various business lines and functional departments. However, similar to other initiatives, AI brings its own opportunities and challenges including various legal and compliance risks. Banks are required to understand this technology clearly and mitigate potential risks associated with the applications of these tools. Introducing sound and transparent measures to mitigate the potential risks entails new initiatives from various lines of defense within banks. In this paper, our focus is devoted to the third line of defense also known as Internal Audit (IA) function. As a current industry practice, effective challenges for different stages of an AI model from data access, collection and compliance, model development and validation, to the deployment and integration of the model within the established IT systems are performed by different teams within Internal Audit function. These teams do not necessarily look at the end-to-end process in a joint effort but rather perform in solos. This approach has successfully been running until today, however, with AI in place this approach will not be as effective and efficient as used to be. The main rationale underlying this statement is that with AI tools one cannot effectively challenge the data part of the model (or the IT deployment) without knowing sufficient information about the model. In this paper, we introduce a unified model-centric framework for Internal Audit function to enable the third line of defense to perform effective challenge for AI tools and technology in a smooth and unified way. In this approach, model team is responsible (not only for the model audit part), in collaboration with other teams, for the end-to-end audit process of AI tools.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Zetzsche, Dirk A.; Arner, Douglas W.; Buckley, Ross P.; Tang, Brian
Artificial Intelligence in Finance: Putting the Human in the Loop Miscellaneous
2020.
@misc{zetzsche_artificial_2020,
title = {Artificial Intelligence in Finance: Putting the Human in the Loop},
author = {Dirk A. Zetzsche and Douglas W. Arner and Ross P. Buckley and Brian Tang},
url = {https://papers.ssrn.com/abstract=3531711},
year = {2020},
date = {2020-02-01},
urldate = {2024-10-21},
publisher = {Social Science Research Network},
address = {Rochester, NY},
abstract = {Finance has become one of the most globalized and digitized sectors of the economy. It is also one of the most regulated of sectors, especially since the 2008 Global Financial Crisis. Globalization, digitization and money are propelling AI in finance forward at an ever increasing pace. This paper develops a regulatory roadmap for understanding and addressing the increasing role of AI in finance, focusing on human responsibility: the idea of “putting the human in the loop” in order in particular to address “black box” issues. Part I maps the various use-cases of AI in finance, highlighting why AI has developed so rapidly in finance and is set to continue to do so. Part II then highlights the range of the potential issues which may arise as a result of the growth of AI in finance. Part III considers the regulatory challenges of AI in the context of financial services and the tools available to address them, and Part IV highlights the necessity of human involvement.We find that the use of AI in finance comes with three regulatory challenges: (1) AI increases information asymmetries regarding the capabilities and effects of algorithms between users, developers, regulators and consumers; (2) AI enhances data dependencies as different day’s data sources may may alter operations, effects and impact; and (3) AI enhances interdependency, in that systems can interact with unexpected consequences, enhancing or diminishing effectiveness, impact and explainability. These issues are often summarized as the “black box” problem: no one understands how some AI operates or why it has done what it has done, rendering accountability impossible.Even if regulatory authorities possessed unlimited resources and expertise – which they clearly do not – regulating the impact of AI by traditional means is challenging. To address this challenge, we argue for strengthening the internal governance of regulated financial market participants through external regulation. Part IV thus suggests that the most effective path forward involves regulatory approaches which bring the human into the loop, enhancing internal governance through external regulation.In the context of finance, the post-Crisis focus on personal and managerial responsibility systems provide a unique and important external framework to enhance internal responsibility in the context of AI, by putting a human in the loop through regulatory responsibility, augmented in some cases with AI review panels. This approach – AI-tailored manager responsibility frameworks, augmented in some cases by independent AI review committees, as enhancements to the traditional three lines of defence – is in our view likely to be the most effective means for addressing AI-related issues not only in finance – particularly “black box” problems – but potentially in any regulated industry.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Lu, Sylvia
Algorithmic Opacity, Private Accountability, and Corporate Social Disclosure in the Age of Artificial Intelligence Journal Article
In: Vanderbilt Journal of Entertainment & Technology Law, vol. 23, no. 1, pp. 99–160, 2020.
@article{lu_algorithmic_2020,
title = {Algorithmic Opacity, Private Accountability, and Corporate Social Disclosure in the Age of Artificial Intelligence},
author = {Sylvia Lu},
url = {https://heinonline.org/HOL/P?h=hein.journals/vanep23&i=111},
year = {2020},
date = {2020-01-01},
urldate = {2024-10-21},
journal = {Vanderbilt Journal of Entertainment & Technology Law},
volume = {23},
number = {1},
pages = {99–160},
abstract = {Today, firms develop machine learning algorithms in nearly every industry to control human decisions, creating a structural tension between commercial opacity and democratic transparency. In many forms of business applications, advanced algorithms are technically complicated and privately owned, hiding from legal regimes and preventing public scrutiny, although they may demonstrate their erosion of democratic norms, damages to financial gains, and extending harms to stakeholders without warning. Nevertheless, because the inner workings and applications of algorithms are generally incomprehensible and protected as trade secrets, they can be completely shielded from public surveillance. One of the solutions to this conflict between algorithmic opacity and democratic transparency is an effective mechanism that incentivizes firms to engage in information disclosure for their algorithms.
This Article argues that the pressing problem of algorithmic opacity is due to the regulatory void of US disclosure regulations that fail to consider the informational needs of stakeholders in the age of AI. In a world of privately-owned algorithms, advanced algorithms as the primary source of decision-making power have produced various perils for the public and firms themselves, particularly in the context of the capital market. While the current disclosure framework has not considered the informational needs associated with algorithmic opacity, this Article argues that algorithmic disclosure under securities law could be used to promote private accountability and further public interest in sustainability.
First, as I discuss, advanced machine learning algorithms have been widely applied in AI systems in many critical industries, including financial services, medical services, and transportation services. Second, despite the growing pervasiveness of algorithms, the laws, particularly intellectual property laws, continue to encourage the existence of algorithmic opacity. Although the protection of trade secrecy in algorithms seems beneficial for firms to create competitive advantage, as I examine, it has proven deleterious for society, where democratic norms such as privacy, equality, and safety are now being compromised by invisible algorithms that no one can ever scrutinize. Third, although the emerging perils of algorithmic opacity are much more catastrophic and messier than before, the current disclosure framework in the context of corporate securities laws fails to consider the informational needs of the stakeholders for advanced algorithms in AI systems.
In this vein, through the lens of the US Securities and Exchange Commission (SEC) disclosure framework, this Article proposes a new disclosure framework for machine-learning-algorithm-based AI systems that considers the technical traits of advanced algorithms, potential dangers of AI systems, and regulatory governance systems in light of increasing AI incidents. Towards this goal, I discuss numerous disclosure topics, analyze key disclosure reports, and propose new principles to help reduce algorithmic opacity, including stakeholder consideration, sustainability consideration, comprehensible disclosure, and minimum necessary disclosure, which I argue can ultimately strike a balance between democratic values in transparency and private interests in opacity. This Article concludes with a discussion of the impacts, limitations, and possibilities of using the new disclosure framework to promote private accountability and corporate social responsibility in the AI era.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
This Article argues that the pressing problem of algorithmic opacity is due to the regulatory void of US disclosure regulations that fail to consider the informational needs of stakeholders in the age of AI. In a world of privately-owned algorithms, advanced algorithms as the primary source of decision-making power have produced various perils for the public and firms themselves, particularly in the context of the capital market. While the current disclosure framework has not considered the informational needs associated with algorithmic opacity, this Article argues that algorithmic disclosure under securities law could be used to promote private accountability and further public interest in sustainability.
First, as I discuss, advanced machine learning algorithms have been widely applied in AI systems in many critical industries, including financial services, medical services, and transportation services. Second, despite the growing pervasiveness of algorithms, the laws, particularly intellectual property laws, continue to encourage the existence of algorithmic opacity. Although the protection of trade secrecy in algorithms seems beneficial for firms to create competitive advantage, as I examine, it has proven deleterious for society, where democratic norms such as privacy, equality, and safety are now being compromised by invisible algorithms that no one can ever scrutinize. Third, although the emerging perils of algorithmic opacity are much more catastrophic and messier than before, the current disclosure framework in the context of corporate securities laws fails to consider the informational needs of the stakeholders for advanced algorithms in AI systems.
In this vein, through the lens of the US Securities and Exchange Commission (SEC) disclosure framework, this Article proposes a new disclosure framework for machine-learning-algorithm-based AI systems that considers the technical traits of advanced algorithms, potential dangers of AI systems, and regulatory governance systems in light of increasing AI incidents. Towards this goal, I discuss numerous disclosure topics, analyze key disclosure reports, and propose new principles to help reduce algorithmic opacity, including stakeholder consideration, sustainability consideration, comprehensible disclosure, and minimum necessary disclosure, which I argue can ultimately strike a balance between democratic values in transparency and private interests in opacity. This Article concludes with a discussion of the impacts, limitations, and possibilities of using the new disclosure framework to promote private accountability and corporate social responsibility in the AI era.
Lianos, Ioannis
Competition Law for the Digital Era: A Complex Systems’ Perspective Miscellaneous
2019.
@misc{lianos_competition_2019,
title = {Competition Law for the Digital Era: A Complex Systems’ Perspective},
author = {Ioannis Lianos},
url = {https://papers.ssrn.com/abstract=3492730},
doi = {10.2139/ssrn.3492730},
year = {2019},
date = {2019-08-01},
urldate = {2024-10-21},
publisher = {Social Science Research Network},
address = {Rochester, NY},
abstract = {As the global economy incurs a process of transformation by the ongoing ‘fourth industrial revolution’, competition law is traversing a ‘liminal’ moment, a period of transition during which the normal limits to thought, self-understanding and behaviour are relaxed, opening the way to novelty and imagination, construction and destruction. There is need for the discussion over the role of competition law in the digital era to be integrated to the broader debate over the new processes of value generation and capture in the era of digital capitalism and the complex economy to which it has given rise to. This complex digital economy is formed by a spider web of economic links, but also their underpinning societal relations, between different agents. However, competition law still lives in the simple world of neo-classical price theory (NPT) economics, which may not provide adequate tools in order to fully comprehend the various dimensions of the competition game. The emphasis put recently by competition authorities on multi-sided markets in order to analyse restrictions of competition in the data economy illustrates the agents’ changing roles and the complexity of their interactions, as the same agents can be at the same time consumers and producers while their personal data raw material for the value generation process.It becomes therefore essential to uncover the new value capture and value generation processes in operation in the digital economy, and draw lessons for the optimal design and enforcement of competition law, rather than take the established competition law framework as a given and try to stretch within it a quite complex reality that may not fit this Procrustean iron bed. These approaches should engage with the complex economics of digital capitalism, and in particular the role of futurity and financialisation, personalisation and cybernetics.These new developments, first, call for a re-conceptualisation of the goals of competition law in the digital era, as competition law moves from the calm and predictable waters of ‘consumer welfare’, narrowly defined, to integrate considerations of income/wealth distribution, privacy and complex equality.Second, it also requires a revision of the current understanding of the nature of the competitive game, which only focuses on horizontal rivalry in product and eventually technology markets. This is of course an important dimension of competition, but hardly the most significant one in the current process of value generation and capture in the digital economy. Firms do not only compete on the product market dimension, but in the today’s financialised economy, probably the most important locus of competition is capital markets. The process of financialisation has important implications for the development of digital capitalism, an issue that the paper explores in detail for the first time in competition law and economics scholarship. Financial markets evaluate companies in view of expected returns in the not so near future, often linked to the emergence of bottlenecks or the perception that a firm holds important assets and resources (e.g. data, algorithms, specialised labour). The role of financial markets’ evaluation in driving business strategies in the era of digital and financialised capitalism is linked to the ‘subtle shift of mindset’ in digital capitalism ‘from profit (and isolating mechanisms) to wealth creation (and the potential for asset appreciation)’ as value is created by investing in assets that will appreciate.Third, this calls for a consideration, not only of horizontal competition, but also of vertical competition, the competition for a higher percentage of the surplus value brought by innovation, and competition from complementary technologies that may challenge the lead position in the value chain of the incumbents (vertical innovation competition). Fairness considerations, among other reasons, may also lead competition authorities to not only focus on inter-platform/ecosystem competition but to also promote intra-platform/ecosystem competition, as this may be a significant element of the competitive game.To implement this broader focus of competition law, we need to develop adequate conceptual tools and methodologies. A recurrent problem is the narrow definition of market power in competition law, whose presence often triggers the competition law assessment, and which is also intrinsically linked to the step of market definition. This currently ignores possible restrictions of vertical competition, personalisation and the predictive role of digital platforms, which may become source of harm for consumers, the competitive process, or the public at large. It is important to engage with concepts of vertical power and the paper develops a typology of vertical power, combining in an overall conceptual framework the various concepts of non-structural power that have been used so far in competition law literature and some new ones (positional and architectural power). This conceptualisation offers an overall theoretical framework for vertical power that is necessary for sound competition law enforcement, and which has been lacking so far. The paper also explores specific metrics for vertical power, although this is still work in progress. Another important tool that competition authorities may employ in order to map the complex competitive interactions (horizontal and vertical) in the digital economy is the value chain approach. Although competition authorities have already used this tool in sector/industry inquiries, they have not in competition law adjudication. A value chain approach enables competition authorities to better assess the bargaining asymmetries across the various segments of the value chain that may result either from the lack of competition on the markets affected or from the central position of some actors in the specific network and their positioning in the value chain. This tool may complete the market definition tool.The effectiveness of competition law in the digital age may be curtailed by the cross-side network effects linked to positive feedback loops, increasing returns to scope and scale, the intense learning effects linked to AI, and the propensity of digital markets to tip. Hence, competition law on its own may not be sufficient to address the market failures in the digital economy. One therefore needs to take a toolkit approach that would combine different fields of law and regulation, competition law playing a primordial role in this new regulatory compass. This toolkit approach may rely on different combinations in each jurisdiction, on the basis of the institutional capabilities and the relative efficiency of the various regulatory alternatives, any choice being between imperfect, if perceived in isolation, institutional alternatives.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Johnson, Kristin; Pasquale, Frank; Chapman, Jennifer
In: Fordham Law Review, vol. 88, no. 2, pp. 499–530, 2019.
@article{johnson_artificial_2019,
title = {Artificial Intelligence, Machine Learning, and Bias in Finance: Toward Responsible Innovation Symposium: Rise of the Machines: Artificial Intelligence, Robotics, and the Reprogramming of Law},
author = {Kristin Johnson and Frank Pasquale and Jennifer Chapman},
url = {https://heinonline.org/HOL/P?h=hein.journals/flr88&i=515},
year = {2019},
date = {2019-01-01},
urldate = {2024-10-21},
journal = {Fordham Law Review},
volume = {88},
number = {2},
pages = {499–530},
abstract = {Over the last decade, a growing number of digital startups launched bids to lure business from the financial services industry. Financial technology (“fintech”) firms deploying ever more complex and opaque algorithms assess the creditworthiness of consumers. Armed with vast quantities of data and complex algorithms to interpret the data, these firms are reigniting debates about how best to regulate financial institutions and technology firms engaged in consumer banking activities.
With a few quick taps on a smart phone, consumers can access a growing universe of apps that offer discounted interest rates on consumer loans. For proponents, the launch of fintech firms marks a new frontier in the ever-expanding utopian vision of the “technological sublime” or faith-like devotion to the potential for technology to transform us into a more equitable and just society. Consumer advocates are justifiably skeptical. While legally prohibited today, well-documented discriminatory, exclusionary, and predatory credit market practices persist.
This Essay describes fintech firms’ integration of learning algorithms and their anticipated economic and social welfare benefits — enhanced efficiency, accuracy, and accessibility. We then examine the emerging regulatory landscape. Over the last decade, federal banking regulators signaled and adopted policies that preempted state regulatory authority over fintech firms. A recent announcement by the Office of the Comptroller of the Currency (OCC) revealed the agency’s intention to allow fintech firms to apply for special purpose charters that would permit them to operate, in many respects, as national banks (“Fintech Charter Decision”).
The OCC’s Fintech Charter Decision creates gaps in the supervision of fintech firms and encourages market participants to engage in regulatory arbitrage. We argue that federal special purpose charters set the stage for regulatory arbitrage and may enable fintech firms to minimize their exposure to state antidiscrimination and consumer protection regulations. Reducing regulatory oversight of these important legal and ethical norms in a dynamic and evolving market defined by a technology that may import unconscious biases and disadvantage lower-income individuals and families raises red flags. We conclude with brief reflections regarding the necessity for courts and regulators to balance the promised benefits of fintech firms’ neo-banking initiatives with the historic and special gatekeeping role of banking platforms. Unilateral deregulatory action by state or federal regulators may undermine efforts to ensure effective oversight of fintech firms that seek to extend access to safe and affordable banking services.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
With a few quick taps on a smart phone, consumers can access a growing universe of apps that offer discounted interest rates on consumer loans. For proponents, the launch of fintech firms marks a new frontier in the ever-expanding utopian vision of the “technological sublime” or faith-like devotion to the potential for technology to transform us into a more equitable and just society. Consumer advocates are justifiably skeptical. While legally prohibited today, well-documented discriminatory, exclusionary, and predatory credit market practices persist.
This Essay describes fintech firms’ integration of learning algorithms and their anticipated economic and social welfare benefits — enhanced efficiency, accuracy, and accessibility. We then examine the emerging regulatory landscape. Over the last decade, federal banking regulators signaled and adopted policies that preempted state regulatory authority over fintech firms. A recent announcement by the Office of the Comptroller of the Currency (OCC) revealed the agency’s intention to allow fintech firms to apply for special purpose charters that would permit them to operate, in many respects, as national banks (“Fintech Charter Decision”).
The OCC’s Fintech Charter Decision creates gaps in the supervision of fintech firms and encourages market participants to engage in regulatory arbitrage. We argue that federal special purpose charters set the stage for regulatory arbitrage and may enable fintech firms to minimize their exposure to state antidiscrimination and consumer protection regulations. Reducing regulatory oversight of these important legal and ethical norms in a dynamic and evolving market defined by a technology that may import unconscious biases and disadvantage lower-income individuals and families raises red flags. We conclude with brief reflections regarding the necessity for courts and regulators to balance the promised benefits of fintech firms’ neo-banking initiatives with the historic and special gatekeeping role of banking platforms. Unilateral deregulatory action by state or federal regulators may undermine efforts to ensure effective oversight of fintech firms that seek to extend access to safe and affordable banking services.
Lin, Tom C. W.
Artificial Intelligence, Finance, and the Law Symposium: Rise of the Machines: Artificial Intelligence, Robotics, and the Reprogramming of Law Journal Article
In: Fordham Law Review, vol. 88, no. 2, pp. 531–552, 2019.
@article{lin_artificial_2019,
title = {Artificial Intelligence, Finance, and the Law Symposium: Rise of the Machines: Artificial Intelligence, Robotics, and the Reprogramming of Law},
author = {Tom C. W. Lin},
url = {https://heinonline.org/HOL/P?h=hein.journals/flr88&i=547},
year = {2019},
date = {2019-01-01},
urldate = {2024-10-21},
journal = {Fordham Law Review},
volume = {88},
number = {2},
pages = {531–552},
abstract = {Artificial intelligence is an existential component of modern finance. The progress and promise realized and presented by artificial intelligence in finance has been thus far remarkable. It has made finance cheaper, faster, larger, more accessible, more profitable, and more efficient in many ways. Yet for all the significant progress and promise made possible by financial artificial intelligence, it also presents serious risks and limitations. This Article offers a study of those risks and limitations—the ways artificial intelligence and misunderstandings of it can harm and hinder law, finance, and society. It provides a broad examination of inherent and structural risks and limitations present in financial artificial intelligence, explains the implications posed by such dangers, and offers some recommendations for the road ahead. Specifically, it highlights the perils and pitfalls of artificial codes, data bias, virtual threats, and systemic risks relating to financial artificial intelligence. It also raises larger issues about the implications of financial artificial intelligence on financial cybersecurity, competition, and society in the near future. Ultimately, this Article aspires to share an insightful perspective for thinking anew about the wide-ranging effects at the intersection of artificial intelligence, finance, and the law with the hopes of creating better financial artificial intelligence—one that is less artificial, more intelligent, and ultimately more humane, and more human.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}