Abstract
In co-located situations, team members use a combination of verbal and visual signals to communicate effectively, among which positional forms play a key role. The spatial patterns adopted by team members in terms of where in the physical space they are standing, and who their body is oriented to, can be key in analysing and increasing the quality of interaction during such face-to-face situations. In this paper, we model the students’ communication based on spatial (positioning) and audio (voice detection) data captured from 92 students working in teams of four in the context of healthcare simulation. We extract non-verbal events (i.e., total speaking time, overlapped speech,and speech responses to team members and teachers) and investigate to what extent they can serve as meaningful indicators of students’ performance according to teachers’ learning intentions. The contribution of this paper to multimodal learning analytics includes: i) a generic method to semi-automatically model communication in a setting where students can freely move in the learning space; and ii) results from a mixed-methods analysis of non-verbal indicators of team communication with respect to teachers’ learning design.
Links
BibTeX (Download)
@inproceedings{10.1145/3506860.3506935, title = {Modelling Co-Located Team Communication from Voice Detection and Positioning Data in Healthcare Simulation}, author = {Linxuan Zhao and Lixiang Yan and Dragan Gasevic and Samantha Dix and Hollie Jaggard and Rosie Wotherspoon and Riordan Alfredo and Xinyu Li and Roberto Martinez-Maldonado}, url = {https://doi.org/10.1145/3506860.3506935}, doi = {10.1145/3506860.3506935}, isbn = {9781450395731}, year = {2022}, date = {2022-01-01}, urldate = {2022-01-01}, booktitle = {LAK22: 12th International Learning Analytics and Knowledge Conference}, pages = {370–380}, publisher = {Association for Computing Machinery}, address = {Online, USA}, series = {LAK22}, abstract = {In co-located situations, team members use a combination of verbal and visual signals to communicate effectively, among which positional forms play a key role. The spatial patterns adopted by team members in terms of where in the physical space they are standing, and who their body is oriented to, can be key in analysing and increasing the quality of interaction during such face-to-face situations. In this paper, we model the students’ communication based on spatial (positioning) and audio (voice detection) data captured from 92 students working in teams of four in the context of healthcare simulation. We extract non-verbal events (i.e., total speaking time, overlapped speech,and speech responses to team members and teachers) and investigate to what extent they can serve as meaningful indicators of students’ performance according to teachers’ learning intentions. The contribution of this paper to multimodal learning analytics includes: i) a generic method to semi-automatically model communication in a setting where students can freely move in the learning space; and ii) results from a mixed-methods analysis of non-verbal indicators of team communication with respect to teachers’ learning design.}, keywords = {audio, collaborative learning, communication, healthcare education, learning analytics, multimodal learning analytics}, pubstate = {published}, tppubtype = {inproceedings} }