Gofman, Michael; Jin, Zhao
Artificial Intelligence, Education, and Entrepreneurship Journal Article
In: The Journal of Finance, vol. 79, no. 1, pp. 631–667, 2024, ISSN: 1540-6261, (_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/jofi.13302).
@article{gofman_artificial_2024,
title = {Artificial Intelligence, Education, and Entrepreneurship},
author = {Michael Gofman and Zhao Jin},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/jofi.13302},
doi = {10.1111/jofi.13302},
issn = {1540-6261},
year = {2024},
date = {2024-01-01},
urldate = {2024-10-21},
journal = {The Journal of Finance},
volume = {79},
number = {1},
pages = {631–667},
abstract = {We document an unprecedented brain drain of Artificial Intelligence (AI) professors from universities from 2004 to 2018. We find that students from the affected universities establish fewer AI startups and raise less funding. The brain-drain effect is significant for tenured professors, professors from top universities, and deep-learning professors. Additional evidence suggests that unobserved city- and university-level shocks are unlikely to drive our results. We consider several economic channels for the findings. The most consistent explanation is that professors' departures reduce startup founders' AI knowledge, which we find is an important factor for successful startup formation and fundraising.},
note = {_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/jofi.13302},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Smuha, Nathalie A.
Pitfalls and pathways for Trustworthy Artificial Intelligence in education Book Section
In: The Ethics of Artificial Intelligence in Education, Routledge, 2022, ISBN: 978-0-429-32906-7, (Num Pages: 33).
@incollection{smuha_pitfalls_2022,
title = {Pitfalls and pathways for Trustworthy Artificial Intelligence in education},
author = {Nathalie A. Smuha},
url = {https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3742421},
isbn = {978-0-429-32906-7},
year = {2022},
date = {2022-01-01},
booktitle = {The Ethics of Artificial Intelligence in Education},
publisher = {Routledge},
abstract = {Artificial Intelligence (AI) applications are entering all domains of our lives, including education. Besides benefits, the use of AI can also entail ethical risks, which are increasingly appearing on legislators’ agendas. Many of these risks are context-specific and increase when vulnerable individuals are involved, asymmetries of power exist, and human rights and democratic values are at stake. Surprisingly, regulators thus far have paid only little attention to the specific risks arising in the context of AI in education (AIED). In this chapter, I assess the ethical challenges posed by AIED, taking as a normative framework the seven requirements for Trustworthy AI set out in the Ethics Guidelines of the European Commission’s High-Level Expert Group on AI. After an overview of the Guidelines’ broader context), I examine each requirement in the educational domain and assess the pitfalls that should be addressed. I pay particular attention to the role of education in shaping people’s minds, and the manner in which this role can be used both to empower and exploit individuals. I note that AIED’s main strength – offering education on a wider scale through more flexible and individualized learning methods – also constitutes a liability when left unchecked. Finally, I discuss various pathways that policymakers should consider to foster Trustworthy AIED beyond the adoption of guidelines, before concluding.},
note = {Num Pages: 33},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Zeide, Elana
Robot Teaching, Pedagogy, and Policy Book Section
In: Dubber, Markus D.; Pasquale, Frank; Das, Sunit (Ed.): The Oxford Handbook of Ethics of AI, pp. 0, Oxford University Press, 2020, ISBN: 978-0-19-006739-7.
@incollection{zeide_robot_2020,
title = {Robot Teaching, Pedagogy, and Policy},
author = {Elana Zeide},
editor = {Markus D. Dubber and Frank Pasquale and Sunit Das},
url = {https://doi.org/10.1093/oxfordhb/9780190067397.013.51},
doi = {10.1093/oxfordhb/9780190067397.013.51},
isbn = {978-0-19-006739-7},
year = {2020},
date = {2020-07-01},
urldate = {2024-10-21},
booktitle = {The Oxford Handbook of Ethics of AI},
pages = {0},
publisher = {Oxford University Press},
abstract = {This chapter looks at the use of artificial intelligence (AI) in education, which immediately conjures the fantasy of robot teachers, as well as fears that robot teachers will replace their human counterparts. However, AI tools impact much more than instructional choices. Personalized learning systems take on a whole host of other educational roles as well, fundamentally reconfiguring education in the process. They not only perform the functions of robot teachers but also make pedagogical and policy decisions typically left to teachers and policymakers. Their design, affordances, analytical methods, and visualization dashboards construct a technological, computational, and statistical infrastructure that literally codifies what students learn, how they are assessed, and what standards they must meet. However, school procurement and implementation of these systems are rarely part of public discussion. If they are to remain relevant to the educational process itself, as opposed to just its packaging and context, schools and their stakeholders must be more proactive in demanding information from technology providers and setting internal protocols to ensure effective and consistent implementation. Those who choose to outsource instructional functions should do so with sufficient transparency mechanisms in place to ensure professional oversight guided by well-informed debate.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Berendt, Bettina; Littlejohn, Allison; Blakemore, Mike
AI in education: learner choice and fundamental rights Journal Article
In: Learning, Media and Technology, vol. 45, no. 3, pp. 312–324, 2020, ISSN: 1743-9884, (Publisher: Routledge).
@article{berendt_ai_2020,
title = {AI in education: learner choice and fundamental rights},
author = {Bettina Berendt and Allison Littlejohn and Mike Blakemore},
url = {https://www.tandfonline.com/doi/full/10.1080/17439884.2020.1786399},
doi = {10.1080/17439884.2020.1786399},
issn = {1743-9884},
year = {2020},
date = {2020-07-01},
urldate = {2024-10-21},
journal = {Learning, Media and Technology},
volume = {45},
number = {3},
pages = {312–324},
abstract = {This article examines benefits and risks of Artificial Intelligence (AI) in education in relation to fundamental human rights. The article is based on an EU scoping study [Berendt, B., A. Littlejohn, P. Kern, P. Mitros, X. Shacklock, and M. Blakemore. 2017. Big Data for Monitoring Educational Systems. Luxembourg: Publications Office of the European Union. https://publications.europa.eu/en/publication-detail/-/publication/94cb5fc8-473e-11e7-aea8-01aa75ed71a1/]. The study takes into account the potential for AI and ‘Big Data’ to provide more effective monitoring of the education system in real-time, but also considers the implications for fundamental human rights and freedoms of both teachers and learners. The analysis highlights a need to balance the benefits and risks as AI tools are developed, marketed and deployed. We conclude with a call to embed consideration of the benefits and risks of AI in education as technology tools into the development, marketing and deployment of these tools. There are questions around who – which body or organisation – should take responsibility for regulating AI in education, particularly since AI impacts not only data protection and privacy, but on fundamental rights in general. Given AI’s global impact, it should be regulated at a trans-national level, with a global organisation such as the UN taking on this role.},
note = {Publisher: Routledge},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Vincent-Lancrin, Stéphan; van der Vlies, Reyer
Trustworthy artificial intelligence (AI) in education: Promises and challenges Technical Report
OECD Paris, 2020.
@techreport{vincent-lancrin_trustworthy_2020,
title = {Trustworthy artificial intelligence (AI) in education: Promises and challenges},
author = {Stéphan Vincent-Lancrin and Reyer van der Vlies},
url = {https://www.oecd-ilibrary.org/education/trustworthy-artificial-intelligence-ai-in-education_a6c90fa9-en;jsessionid=uC_LVbmlcDOCZRaw–UOB27PdaFW27dwmNZkUqPd.ip-10-240-5-152},
doi = {10.1787/a6c90fa9-en},
year = {2020},
date = {2020-04-01},
urldate = {2024-10-21},
address = {Paris},
institution = {OECD},
abstract = {This paper was written to support the G20 artificial intelligence (AI) dialogue. With the rise of artificial intelligence (AI), education faces two challenges: reaping the benefits of AI to improve education processes, both in the classroom and at the system level; and preparing students for new skillsets for increasingly automated economies and societies. AI applications are often still nascent, but there are many examples of promising uses that foreshadow how AI might transform education. With regard to the classroom, this paper highlights how AI can accelerate personalised learning, the support of students with special needs. At the system level, promising uses include predictive analysis to reduce dropout, and assessing new skillsets. A new demand for complex skills that are less easy to automate (e.g. higher cognitive skills like creativity and critical thinking) is also the consequence of AI and digitalisation. Reaching the full potential of AI requires that stakeholders trust not only the technology, but also its use by humans. This raises new policy challenges around “trustworthy AI”, encompassing the privacy and security of data, but also possible wrongful uses of data leading to biases against individuals or groups.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Pasquale, Frank
Professional Judgment in an Era of Artificial Intelligence and Machine Learning Journal Article
In: boundary 2, vol. 46, no. 1, pp. 73–101, 2019, ISSN: 0190-3659.
@article{pasquale_professional_2019,
title = {Professional Judgment in an Era of Artificial Intelligence and Machine Learning},
author = {Frank Pasquale},
url = {https://doi.org/10.1215/01903659-7271351},
doi = {10.1215/01903659-7271351},
issn = {0190-3659},
year = {2019},
date = {2019-02-01},
urldate = {2024-10-21},
journal = {boundary 2},
volume = {46},
number = {1},
pages = {73–101},
abstract = {Though artificial intelligence (AI) in healthcare and education now accomplishes diverse tasks, there are two features that tend to unite the information processing behind efforts to substitute it for professionals in these fields: reductionism and functionalism. True believers in substitutive automation tend to model work in human services by reducing the professional role to a set of behaviors initiated by some stimulus, which are intended to accomplish some predetermined goal, or maximize some measure of well-being. However, true professional judgment hinges on a way of knowing the world that is at odds with the epistemology of substitutive automation. Instead of reductionism, an encompassing holism is a hallmark of professional practice—an ability to integrate facts and values, the demands of the particular case and prerogatives of society, and the delicate balance between mission and margin. Any presently plausible vision of substituting AI for education and health-care professionals would necessitate a corrosive reductionism. The only way these sectors can progress is to maintain, at their core, autonomous professionals capable of carefully intermediating between technology and the patients it would help treat, or the students it would help learn.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Sangapu, Indira
Artificial Intelligence in Education - From a Teacher and a Student Perspective Miscellaneous
2018.
@misc{sangapu_artificial_2018,
title = {Artificial Intelligence in Education - From a Teacher and a Student Perspective},
author = {Indira Sangapu},
url = {https://papers.ssrn.com/abstract=3372914},
doi = {10.2139/ssrn.3372914},
year = {2018},
date = {2018-12-01},
urldate = {2024-10-21},
publisher = {Social Science Research Network},
address = {Rochester, NY},
abstract = {The application of Artificial Intelligence (AI) is now seen in almost every segment of our life. The current study is to explore the perception of teachers and students, on the usage and effectiveness of AI in the classroom. Online survey with open-ended questions is mailed to different participants of both the segments (teachers and students) and the qualitative data obtained through this is analyzed using MAXQDA 2018.1 version. AI is perceived as a bane and also a boon to the education system and to the human intellect. Optimistic utilization of AI in the classrooms is highly recommended by both teacher and student participants. It is also identified that majority of the teachers are more adaptable to embrace new technological changes than students. Further study on generation and geographic diversity based teacher and student perceptions may support more effective implementation of AI in education.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Popenici, Stefan A. D.; Kerr, Sharon
Exploring the impact of artificial intelligence on teaching and learning in higher education Journal Article
In: Research and Practice in Technology Enhanced Learning, vol. 12, no. 1, pp. 22, 2017, ISSN: 1793-7078.
@article{popenici_exploring_2017,
title = {Exploring the impact of artificial intelligence on teaching and learning in higher education},
author = {Stefan A. D. Popenici and Sharon Kerr},
url = {https://doi.org/10.1186/s41039-017-0062-8},
doi = {10.1186/s41039-017-0062-8},
issn = {1793-7078},
year = {2017},
date = {2017-11-01},
urldate = {2024-10-21},
journal = {Research and Practice in Technology Enhanced Learning},
volume = {12},
number = {1},
pages = {22},
abstract = {This paper explores the phenomena of the emergence of the use of artificial intelligence in teaching and learning in higher education. It investigates educational implications of emerging technologies on the way students learn and how institutions teach and evolve. Recent technological advancements and the increasing speed of adopting new technologies in higher education are explored in order to predict the future nature of higher education in a world where artificial intelligence is part of the fabric of our universities. We pinpoint some challenges for institutions of higher education and student learning in the adoption of these technologies for teaching, learning, student support, and administration and explore further directions for research.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zeide, Elana
The Structural Consequences of Big Data-Driven Education Journal Article
In: Big Data, vol. 5, no. 2, pp. 164–172, 2017, ISSN: 2167-647X.
@article{zeide_structural_2017,
title = {The Structural Consequences of Big Data-Driven Education},
author = {Elana Zeide},
url = {https://pubmed.ncbi.nlm.nih.gov/28632444/},
doi = {10.1089/big.2016.0061},
issn = {2167-647X},
year = {2017},
date = {2017-06-01},
journal = {Big Data},
volume = {5},
number = {2},
pages = {164–172},
abstract = {Educators and commenters who evaluate big data-driven learning environments focus on specific questions: whether automated education platforms improve learning outcomes, invade student privacy, and promote equality. This article puts aside separate unresolved-and perhaps unresolvable-issues regarding the concrete effects of specific technologies. It instead examines how big data-driven tools alter the structure of schools' pedagogical decision-making, and, in doing so, change fundamental aspects of America's education enterprise. Technological mediation and data-driven decision-making have a particularly significant impact in learning environments because the education process primarily consists of dynamic information exchange. In this overview, I highlight three significant structural shifts that accompany school reliance on data-driven instructional platforms that perform core school functions: teaching, assessment, and credentialing. First, virtual learning environments create information technology infrastructures featuring constant data collection, continuous algorithmic assessment, and possibly infinite record retention. This undermines the traditional intellectual privacy and safety of classrooms. Second, these systems displace pedagogical decision-making from educators serving public interests to private, often for-profit, technology providers. They constrain teachers' academic autonomy, obscure student evaluation, and reduce parents' and students' ability to participate or challenge education decision-making. Third, big data-driven tools define what "counts" as education by mapping the concepts, creating the content, determining the metrics, and setting desired learning outcomes of instruction. These shifts cede important decision-making to private entities without public scrutiny or pedagogical examination. In contrast to the public and heated debates that accompany textbook choices, schools often adopt education technologies ad hoc. Given education's crucial impact on individual and collective success, educators and policymakers must consider the implications of data-driven education proactively and explicitly.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Xing, Bo; Marwala, Tshilidzi
Implications of the Fourth Industrial Age on Higher Education Miscellaneous
2017, (arXiv:1703.09643).
@misc{xing_implications_2017,
title = {Implications of the Fourth Industrial Age on Higher Education},
author = {Bo Xing and Tshilidzi Marwala},
url = {http://arxiv.org/abs/1703.09643},
doi = {10.48550/arXiv.1703.09643},
year = {2017},
date = {2017-03-01},
urldate = {2024-10-21},
publisher = {arXiv},
abstract = {Higher education in the fourth industrial revolution, HE 4.0, is a complex, dialectical and exciting opportunity which can potentially transform society for the better. The fourth industrial revolution is powered by artificial intelligence and it will transform the workplace from tasks based characteristics to the human centred characteristics. Because of the convergence of man and machine, it will reduce the subject distance between humanities and social science as well as science and technology. This will necessarily require much more interdisciplinary teaching, research and innovation. This paper explores the impact of HE 4.0 on the mission of a university which is teaching, research (including innovation) and service.},
note = {arXiv:1703.09643},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}