@misc{13529,
  abstract     = {{The proliferation of misinformation is one of the most pressing challenges in today’s digital landscape, due to its far-reaching implications for public health, economic stability, trust in governmental institutions, and societal cohesion. Despite efforts to regulate online platforms and limit the spread of misinformation, many individuals are left behind because of their low digital literacy, level of education, and other contributing factors. In this context, we explore the use of Large Language Models (LLMs) to identify misinformation and we evaluate the capabilities of GPT-4.1-mini, as a representative example of these models. We then discuss how LLMs can help empower users to critically create and share information, thereby fostering more resilient online communities. We also present a set of possible interaction patterns for content creation and moderation.}},
  author       = {{Franco, Mirko and Grimm, Valentin and Herder, Eelco}},
  booktitle    = {{Proceedings of the 2025 International Conference on Information Technology for Social Good}},
  editor       = {{Marquez-Barja, Johann and Bujari, Armir and Slamnik-Kriještorac, Nina and Sabbioni, Andrea}},
  isbn         = {{979-8-4007-2089-5}},
  keywords     = {{misinformation, fake news, large language models, online social networks}},
  location     = {{Antwerp, Belgium}},
  pages        = {{244 -- 252}},
  publisher    = {{ACM}},
  title        = {{{Preventing Accidental Sharing of Misinformation Using Large Language Models}}},
  doi          = {{10.1145/3748699.3749798}},
  year         = {{2025}},
}

