2018
|
Hair, A; Monroe, P; Ahmed, B; Ballard, K J; Gutierrez-Osuna, R Apraxia World: A Speech Therapy Game for Children with Speech Sound Disorders Proceedings Article In: Proceedings of the 2018 Conference on Interaction Design and Children, ACM, 2018, ISBN: 978-1-4503-5152-2/18/06. @inproceedings{hair2018idc,
title = {Apraxia World: A Speech Therapy Game for Children with Speech Sound Disorders},
author = {A Hair and P Monroe and B Ahmed and K J Ballard and R Gutierrez-Osuna},
url = {https://psi.engr.tamu.edu/wp-content/uploads/2018/04/hair2018idc.pdf},
doi = {10.1145/3202185.3202733},
isbn = {978-1-4503-5152-2/18/06},
year = {2018},
date = {2018-06-19},
booktitle = {Proceedings of the 2018 Conference on Interaction Design and Children},
publisher = {ACM},
abstract = {This paper presents Apraxia World, a remote therapy tool for speech sound disorders that integrates speech exercises into an engaging platformer-style game. In Apraxia World, the player controls the avatar with virtual buttons/joystick, whereas speech input is associated with assets needed to advance from one level to the next. We tested performance and child preference of two strategies for delivering speech exercises: during each level, and after it. Most children indicated that doing exercises after completing each level was less disruptive and preferable to doing exercises scattered through the level. We also found that children liked having perceived control over the game (character appearance, exercise behavior). Our results indicate that (i) a familiar style of game successfully engages children, (ii) speech exercises function well when decoupled from game control, and (iii) children are willing to complete required speech exercises while playing a game they enjoy.},
keywords = {Childhood apraxia of speech, Health, Mobile computing, Speech},
pubstate = {published},
tppubtype = {inproceedings}
}
This paper presents Apraxia World, a remote therapy tool for speech sound disorders that integrates speech exercises into an engaging platformer-style game. In Apraxia World, the player controls the avatar with virtual buttons/joystick, whereas speech input is associated with assets needed to advance from one level to the next. We tested performance and child preference of two strategies for delivering speech exercises: during each level, and after it. Most children indicated that doing exercises after completing each level was less disruptive and preferable to doing exercises scattered through the level. We also found that children liked having perceived control over the game (character appearance, exercise behavior). Our results indicate that (i) a familiar style of game successfully engages children, (ii) speech exercises function well when decoupled from game control, and (iii) children are willing to complete required speech exercises while playing a game they enjoy. |
2016
|
McKechnie, J; Ballard, K J; McCabe, P; Murray, E; Lan, T; Gutierrez-Osuna, R; Ahmed, B Influence of type of feedback on effect of tablet-based delivery of intensive speech therapy in children with Childhood Apraxia of Speech Proceedings Article In: Proceedings of the Motor Speech Conference, 2016. @inproceedings{mckechnie-2016-motorspeech,
title = {Influence of type of feedback on effect of tablet-based delivery of intensive speech therapy in children with Childhood Apraxia of Speech},
author = {J McKechnie and K J Ballard and P McCabe and E Murray and T Lan and R Gutierrez-Osuna and B Ahmed},
year = {2016},
date = {2016-03-03},
booktitle = {Proceedings of the Motor Speech Conference},
journal = {Motor Speech Conference},
keywords = {Childhood apraxia of speech, Games, Health, Mobile computing, Speech},
pubstate = {published},
tppubtype = {inproceedings}
}
|
2015
|
Parnandi, A; Karappa, V; Lan, T; Shahin, M; McKechnie, J; Ballard, K; Ahmed, B; Gutierrez-Osuna, R Development of a remote therapy tool for childhood apraxia of speech Journal Article In: ACM Transactions on Accessible Computing, vol. 7, no. 3, pp. 10:1-10:23, 2015. @article{parnandi2015taccess,
title = {Development of a remote therapy tool for childhood apraxia of speech},
author = {A Parnandi and V Karappa and T Lan and M Shahin and J McKechnie and K Ballard and B Ahmed and R Gutierrez-Osuna},
url = {https://psi.engr.tamu.edu/wp-content/uploads/2018/01/parnandi2015taccess.pdf},
year = {2015},
date = {2015-11-01},
journal = {ACM Transactions on Accessible Computing},
volume = {7},
number = {3},
pages = {10:1-10:23},
keywords = {Childhood apraxia of speech, Games, Health, Mobile computing, Speech},
pubstate = {published},
tppubtype = {article}
}
|
Shahin, M; Ahmed, B; Parnandi, A; Karappa, V; McKechnie, J; Ballard, K; Gutierrez-Osuna, R Tabby Talks: an automated tool for the assessment of childhood apraxia of speech Journal Article In: Speech Communication, vol. in press, 2015. @article{shahin2015specom,
title = {Tabby Talks: an automated tool for the assessment of childhood apraxia of speech},
author = {M Shahin and B Ahmed and A Parnandi and V Karappa and J McKechnie and K Ballard and R Gutierrez-Osuna},
url = {https://psi.engr.tamu.edu/wp-content/uploads/2018/01/shahin2015specom.pdf},
year = {2015},
date = {2015-04-02},
urldate = {2015-04-02},
journal = {Speech Communication},
volume = {in press},
keywords = {Childhood apraxia of speech, Games, Health, Mobile computing, Speech},
pubstate = {published},
tppubtype = {article}
}
|
2013
|
Parnandi, A; Karappa, V; Son, Y; Shahin, M; McKechnie, J; Ballard, K; Ahmed, B; Gutierrez-Osuna, R Architecture of an automated therapy tool for childhood apraxia of speech Conference The 15th International ACM SIGACCESS Conference on Computers and Accessibility (ASSETS), 2013. @conference{avinashassets2013,
title = {Architecture of an automated therapy tool for childhood apraxia of speech},
author = {A Parnandi and V Karappa and Y Son and M Shahin and J McKechnie and K Ballard and B Ahmed and R Gutierrez-Osuna},
url = {https://psi.engr.tamu.edu/wp-content/uploads/2018/01/avinashassets2013.pdf},
year = {2013},
date = {2013-10-21},
urldate = {2013-10-21},
booktitle = {The 15th International ACM SIGACCESS Conference on Computers and Accessibility (ASSETS)},
keywords = {Childhood apraxia of speech, Games, Health, Mobile computing, Speech},
pubstate = {published},
tppubtype = {conference}
}
|
2012
|
Parnandi, A; Son, Y; Shahin, M; Ahmed, B; Gutierrez-Osuna, R Architecture of an Automated Therapy Tool for Childhood Apraxia of Speech Technical Report 2012. @techreport{parnandi2012techreport-2,
title = {Architecture of an Automated Therapy Tool for Childhood Apraxia of Speech},
author = {A Parnandi and Y Son and M Shahin and B Ahmed and R Gutierrez-Osuna},
url = {https://psi.engr.tamu.edu/wp-content/uploads/2018/01/parnandi2012techreport-2.pdf},
year = {2012},
date = {2012-08-21},
urldate = {2012-08-21},
abstract = {We present a multi-tier architecture for automating the administration of speech therapy to children suffering from apraxia of speech. This architecture follows a client-server model and facilitates task-oriented remote therapeutic training in home settings. The therapy regimen is remotely assigned to the child by a speech therapist based on a standardized protocol. We utilize tablet PCs to provide stimuli to the children and record their speech response. The speech data is then streamed to a back-end server running a specialized speech-processing module to identify errors and quantify the progress of the child. These automated results allow the therapist to closely monitor the performance of each child, provide relevant feedback, and adapt the training program as needed. Our proposed architecture can accommodate a variety of interaction modalities that can serve as a complement to traditional face-to-face speech practice. In this paper we describe the client-server architecture, the middleware tools upon which the system has been built, and the speechprocessing tools for automatically scoring the patients’ speech.},
keywords = {Games, Health, Mobile computing, Speech},
pubstate = {published},
tppubtype = {techreport}
}
We present a multi-tier architecture for automating the administration of speech therapy to children suffering from apraxia of speech. This architecture follows a client-server model and facilitates task-oriented remote therapeutic training in home settings. The therapy regimen is remotely assigned to the child by a speech therapist based on a standardized protocol. We utilize tablet PCs to provide stimuli to the children and record their speech response. The speech data is then streamed to a back-end server running a specialized speech-processing module to identify errors and quantify the progress of the child. These automated results allow the therapist to closely monitor the performance of each child, provide relevant feedback, and adapt the training program as needed. Our proposed architecture can accommodate a variety of interaction modalities that can serve as a complement to traditional face-to-face speech practice. In this paper we describe the client-server architecture, the middleware tools upon which the system has been built, and the speechprocessing tools for automatically scoring the patients’ speech. |
Lucchese, G; Field, M; Ho, J; Gutierrez-Osuna, R; Hammond, T GestureCommander: continuous touch-based gesture prediction Conference Proceedings of the 2012 ACM annual conference extended abstracts on Human Factors in Computing Systems Extended Abstracts, ACM 2012. @conference{lucchese2012chi,
title = {GestureCommander: continuous touch-based gesture prediction},
author = {G Lucchese and M Field and J Ho and R Gutierrez-Osuna and T Hammond},
url = {https://psi.engr.tamu.edu/wp-content/uploads/2018/01/lucchese2012chi.pdf},
year = {2012},
date = {2012-01-01},
booktitle = {Proceedings of the 2012 ACM annual conference extended abstracts on Human Factors in Computing Systems Extended Abstracts},
pages = {1925--1930},
organization = {ACM},
abstract = {GestureCommander is a touch-based gesture control system for mobile devices that is able to recognize gestures as they are being performed. Continuous recognition allows the system to provide visual feedback to the user and to anticipate user commands to possibly decrease perceived response time. To achieve this goal we employ two Hidden Markov Model (HMM) systems, one for recognition and another for generating visual feedback. We analyze a set of geometric features used in other gesture recognition systems and determine a subset that works best for HMMs. Finally we demonstrate the practicality of our recognition HMMs in a proof of concept mobile application for Google's Android mobile platform that has a recognition accuracy rate of 96% over 15 distinct gestures.},
keywords = {Gestures, Mobile computing},
pubstate = {published},
tppubtype = {conference}
}
GestureCommander is a touch-based gesture control system for mobile devices that is able to recognize gestures as they are being performed. Continuous recognition allows the system to provide visual feedback to the user and to anticipate user commands to possibly decrease perceived response time. To achieve this goal we employ two Hidden Markov Model (HMM) systems, one for recognition and another for generating visual feedback. We analyze a set of geometric features used in other gesture recognition systems and determine a subset that works best for HMMs. Finally we demonstrate the practicality of our recognition HMMs in a proof of concept mobile application for Google's Android mobile platform that has a recognition accuracy rate of 96% over 15 distinct gestures. |