@misc{12048,
  abstract     = {{Interactive stories can be an effective approach for teaching purposes. One shortcoming is the effort necessary to author and create these stories, especially complex storylines with choices for the readers. Based on recent advances in Natural Language Processing (NLP), new opportunities arise for assistance systems in the context of interactive stories. In our work, we present an authoring approach and prototypical tool for the creation of visual comic-strip like interactive stories, a type of hypercomics, that integrate an Artificial Intelligence (AI) assistance. Such comics are already used in our Gekonnt hanDeln web platform. The AI assistance provides suggestions for the overall story outline as well as how to design and write individual story frames. We provide a detailed description about the approach and its prototypical implementation. Furthermore, we present a study evaluating the prototype with student groups and how the prototype evolved in an iterative style based on the students’ feedback.}},
  author       = {{Grimm, Valentin and Rubart, Jessica}},
  booktitle    = {{HT '24: Proceedings of the 35th ACM Conference on Hypertext and Social Media}},
  isbn         = {{979-8-4007-0595-3 }},
  keywords     = {{Storytelling, Authoring, GPT, Hypercomics, Large Language Models}},
  location     = {{Poznan, Poland}},
  pages        = {{88--97}},
  publisher    = {{Association for Computing Machinery (ACM)}},
  title        = {{{Authoring Educational Hypercomics assisted by Large Language Models}}},
  doi          = {{10.1145/3648188.3675124}},
  year         = {{2024}},
}

@inproceedings{4094,
  abstract     = {{Projection-based assitive systems that guide users through assembly work are on their way to industrial application. Previous research work investigated how people can be supported with such systems. However, there has been little work on the question on how to generate and author sequential instructions for assitive systems. In this paper, we present a new concept and a prototypical implementation of an assitive system that can be taught by demonstrating an assembly process. By using a combination of RGB and depth cameras, we can generate an assembly instruction of Lego Duplo bricks based on the demonstration of a user. This generated manual can later on be used for assisting other users in the assembly process. By our prototype system, we show the technological feasibility of assistive systems that can learn from users.}},
  author       = {{Büttner, Sebastian and Peda, Andreas and Heinz, Mario and Röcker, Carsten}},
  booktitle    = {{22nd International Conference on Human-Computer Interaction}},
  isbn         = {{978-3-030-50343-7}},
  keywords     = {{Assitive system, Authoring, Instruction generation, Computer vision, Teaching by demonstration}},
  location     = {{Copenhagen, Denmark}},
  pages        = {{153--163}},
  publisher    = {{Springer}},
  title        = {{{Teaching by Demonstrating – How Smart Assistive Systems Can Learn from Users}}},
  doi          = {{https://doi.org/10.1007/978-3-030-50344-4_12}},
  volume       = {{12203}},
  year         = {{2020}},
}

