@Article{info:doi/10.2196/63854, author="Hess, W. Courtney and Rosenbloom, N. Brittany and Mesaroli, Giulia and Lopez, Cristal and Ngo, Nhat and Cohen, Estreya and Ouellette, Carley and Gold, I. Jeffrey and Logan, Deirdre and Simons, E. Laura and Stinson, N. Jennifer", title="Extended Reality (XR) in Pediatric Acute and Chronic Pain: Systematic Review and Evidence Gap Map", journal="JMIR Pediatr Parent", year="2025", month="Apr", day="7", volume="8", pages="e63854", keywords="virtual reality", keywords="augmented reality", keywords="extended reality", keywords="acute pain", keywords="chronic pain", keywords="pediatrics", keywords="adolescents", keywords="safety", keywords="feasibility", keywords="effectiveness", keywords="evidence gap map", keywords="child", keywords="children", keywords="VR", keywords="XR", keywords="biobehavioral", keywords="intervention", keywords="systematic review", abstract="Background: The use of extended reality (XR), including virtual reality (VR) and augmented reality (AR), for treating pain has accelerated in the last 10 years. XR is an attractive biobehavioral intervention that may support management of pain or pain-related disability. Reviews of the literature pertaining to adults report promising results, particularly for acute procedural pain. Objective: This study aimed to (1) summarize the available evidence with respect to feasibility, safety, and effectiveness (pain intensity) of XR for pediatric acute and chronic pain; (2) summarize assessment tools used to measure study outcomes; and (3) identify gaps in evidence to guide future research efforts. Methods: This study is a systematic review of the literature. Multiple databases (CINAHL, Cochrane Central, Embase, MEDLINE, PsycINFO) were searched from inception until March 2023. Titles, abstracts, and full-text articles were reviewed by 2 team members to determine eligibility. Articles were included if the (1) participants were aged 0 to 18 years; (2) study intervention was VR or AR; (3) study outcomes included safety, feasibility, acceptability, or effectiveness on the outcome of pain; and (4) study design was observational or interventional. Data were collected on bibliographic information; study characteristics; XR characteristics; outcome domains; outcome measures; and study findings pertaining to safety, feasibility, and effectiveness. Results: We included 90 articles in the review. All included studies used VR, and 93\% (84/90) studied VR in the context of acute pain. Of the 90 studies, 74 studies were randomized trials, and 15 studies were observational. Safety was assessed in 23 studies of acute pain, with 13 studies reporting no adverse events and 10 studies reporting events of low concern. Feasibility was assessed in 27 studies. Of the 84 studies of acute pain, 62\% (52/84) reported a positive effect on pain intensity, 21\% (18/84) reported no effect, and 13\% (11/84) reported mixed effects. All 6 studies of chronic pain reported a positive effect on pain intensity. An evidence gap map was used to illuminate gaps in specific research areas stratified by subtypes of pain. Risk of bias assessment revealed 67 studies had a moderate risk of bias, 17 studies had a high risk, and 5 studies were deemed to be low risk. Conclusions: The current body of literature around XR for pediatric pain is focused on acute pain with promising results of safety and effectiveness on pain intensity. The literature pertaining to chronic pain lags behind, limiting our ability to draw conclusions. The risk of bias in studies is problematic in this field, with the inherent challenge of blinding participants and researchers to the intervention. Future research should aim to measure effectiveness beyond pain intensity with a consistent approach to measuring key outcome domains and measures. Current efforts are underway to establish expert consensus on best research practices in this field. Trial Registration: Prospero CRD42022307153; https://www.crd.york.ac.uk/PROSPERO/view/CRD42022307153 ", doi="10.2196/63854", url="https://pediatrics.jmir.org/2025/1/e63854" } @Article{info:doi/10.2196/54312, author="Mart{\'i}n-Valero, Roc{\'i}o and Vega-Morales Sr, Alejandro and Mart{\'i}n-Vega, Javier Francisco and Rodriguez-Huguet, Manuel and Rodr{\'i}guez-Mart{\'i}nez, Carmen Maria and Vinolo-Gil, Jesus Maria", title="Effectiveness of Augmented Reality in the Teaching of Health University Students: Quasi-Experimental Study", journal="JMIR Serious Games", year="2025", month="Mar", day="27", volume="13", pages="e54312", keywords="augmented reality", keywords="qualifications", keywords="usability", keywords="university", keywords="teaching", keywords="education", keywords="implementation", keywords="academic performance", keywords="quasi-experimental design", keywords="control group", keywords="applications", keywords="experimental group", abstract="Background: The exponential growth of new technologies has resulted in the need for updating the field of education. From the educational point of view, there are some studies that have promoted the implementation of new technologies. These facts have raised the need to implement augmented reality in the university environment, especially among students of health sciences. The use of augmented reality can mean a new approach to teaching by teachers and better learning by students. Objective: We aimed to analyze the degree of usability of two augmented reality applications and to compare the academic performance between the control group and the experimental group at the Universities of C{\'a}diz and M{\'a}laga. The students at the University of M{\'a}laga used the Zapworks augmented reality software, while those at the University of C{\'a}diz used the Aumentaty augmented reality software for their respective experimental groups. The secondary objective was to measure the relationships between all the studied variables. Methods: This was a quasi-experimental design with a posttest as the only evaluation measure. We followed the SPIRIT (Standard Protocol Items: Recommendations for Interventional Trials) statement and the ethical and legal aspects of the Principles of the Declaration of Helsinki. An intervention was carried out using two augmented reality applications on the subject of General Procedures in Physiotherapy II at the Universities of M{\'a}laga and C{\'a}diz. Results: A total of 199 participants took part in the study. Demographic variables, ratings, and usability were assessed, followed by statistical analysis, with the results and their interpretation being described in the study. Significant differences (P<.001) were found in the ratings at both the universities. In addition, significant differences (P<.001) were found between the experimental group and the control group. Regarding the degree of usability in the univariate analysis, no significant differences were found (P=.049). A multiple regression analysis of the rating and usability was performed. The rating showed significant differences, with a beta of 1.4 (P<.001), and usability was also significant (P=.03) in favor of the Aumentaty group. Conclusions: Significant differences were observed in those who used augmented reality compared to the control group, with higher values observed in the University of C{\'a}diz. There are no correlations between the variables of usability and qualifications. Trial Registration: ClinicalTrials.gov NCT05798468; https://clinicaltrials.gov/study/NCT05798468 ", doi="10.2196/54312", url="https://games.jmir.org/2025/1/e54312" } @Article{info:doi/10.2196/62785, author="Fleet, Andrew and Kaustov, Lilia and Belfiore, BR Elio and Kapralos, Bill and Matava, Clyde and Wiegelmann, Julian and Giacobbe, Peter and Alam, Fahad", title="Current Clinical and Educational Uses of Immersive Reality in Anesthesia: Narrative Review", journal="J Med Internet Res", year="2025", month="Mar", day="11", volume="27", pages="e62785", keywords="virtual reality", keywords="augmented reality", keywords="mixed reality", keywords="anesthesia", keywords="immersive reality", keywords="medical education", keywords="artificial intelligence", abstract="Background: The concept of immersive reality (IR), an umbrella term that encompasses virtual reality, augmented reality, and mixed reality, has been established within the health care realm as a potentially valuable tool with numerous applications in both medical education and patient care. Objective: This review aimed to introduce anesthesiologists to the emerging and rapidly evolving literature on IR, its use in anesthesia education, and its transferability into the clinical context. Methods: A review of the relevant literature was conducted using the PubMed database from inception to July 5, 2023. Additional references were identified from the reference lists of selected papers. Results: A total of 51 papers related to the use of IR in anesthesia medical education (including both technical and nontechnical skills) and 63 papers related to applications in clinical practice (eg, preprocedure planning, patient education, and pain management) were included. We present evidence supporting the use of IR in the training and clinical practice of modern anesthesiologists. Conclusions: IR is useful for a variety of applications in anesthesia medical education and has potential advantages over existing simulation approaches. Similarly, IR has demonstrated potential improvements in patient care across several clinical contexts relevant to practicing anesthesiologists. However, many applications remain in the early stages of development, and robust trials are urgently needed to confirm clinical or educational effectiveness and to assess mechanisms, educational validity, and cost-effectiveness. ", doi="10.2196/62785", url="https://www.jmir.org/2025/1/e62785" } @Article{info:doi/10.2196/66222, author="Harari, E. Rayan and Schulwolf, L. Sara and Borges, Paulo and Salmani, Hamid and Hosseini, Farhang and Bailey, T. Shannon K. and Quach, Brian and Nohelty, Eric and Park, Sandra and Verma, Yash and Goralnick, Eric and Goldberg, A. Scott and Shokoohi, Hamid and Dias, D. Roger and Eyre, Andrew", title="Applications of Augmented Reality for Prehospital Emergency Care: Systematic Review of Randomized Controlled Trials", journal="JMIR XR Spatial Comput", year="2025", month="Feb", day="11", volume="2", pages="e66222", keywords="prehospital emergency care", keywords="emergency medical services", keywords="randomized controlled trials", keywords="clinical decision support", keywords="training", keywords="augmented reality", keywords="emergency", keywords="care", keywords="systematic review", keywords="BLS", keywords="procedures", keywords="traumatic injury", keywords="survival", keywords="prehospital", keywords="emergency care", keywords="AR", keywords="decision-making", keywords="educational", keywords="education", keywords="EMS", keywords="database", keywords="technology", keywords="critical care", keywords="basic life support", abstract="Background: Delivering high-quality prehospital emergency care remains challenging, especially in resource-limited settings where real-time clinical decision support is limited. Augmented reality (AR) has emerged as a promising health care technology, offering potential solutions to enhance decision-making, care processes, and emergency medical service (EMS) training. Objective: This systematic review assesses the effectiveness of AR in improving clinical decision-making, care delivery, and educational outcomes for EMS providers. Methods: We searched databases including PubMed, Cochrane CENTRAL, Web of Science, Institute of Electrical and Electronics Engineers (IEEE), Embase, PsycInfo, and Association for Computing Machinery (ACM). Studies were selected based on their focus on AR in prehospital care. A total of 14 randomized controlled trials were selected from an initial screening of 2081 manuscripts. Included studies focused on AR use by EMS personnel, examining clinical and educational impacts. Data such as study demographics, intervention type, outcomes, and methodologies were extracted using a standardized form. Primary outcomes assessed included clinical task accuracy, response times, and training efficacy. A narrative synthesis was conducted, and bias was evaluated using Cochrane's risk of bias tool. Improvements in AR-assisted interventions and their limitations were analyzed. Results: AR significantly improved clinical decision-making accuracy and EMS training outcomes, reducing response times in simulations and real-world applications. However, small sample sizes and challenges in integrating AR into workflows limit the generalizability of the findings. Conclusions: AR holds promise for transforming prehospital care by enhancing real-time decision-making and EMS training. Future research should address technological integration and scalability to fully realize AR's potential in EMS. ", doi="10.2196/66222", url="https://xr.jmir.org/2025/1/e66222" } @Article{info:doi/10.2196/49610, author="Zuidhof, Niek and Peters, Oscar and Verbeek, Peter-Paul and Ben Allouch, Somaya", title="Social Acceptance of Smart Glasses in Health Care: Model Evaluation Study of Anticipated Adoption and Social Interaction", journal="JMIR Form Res", year="2025", month="Feb", day="11", volume="9", pages="e49610", keywords="smart glasses", keywords="technology adoption", keywords="social interaction", keywords="instrument development", keywords="structural equation modeling", abstract="Background: Despite the growing interest in smart glasses, it is striking that they are not widespread among health care professionals. Previous research has identified issues related to social interactions involving the use of smart glasses in public settings, which may differ from those associated with their application in health care contexts. Objective: Assuming that smart glasses mediate contact between the health care provider and patient, the objectives of this research are two-fold: (1) to develop an instrument that combines the adoption and mediation perspectives, and (2) to gain insights into how the intention to use is influenced through aspects of adoption and social interaction. Methods: A questionnaire was administered to a target audience of health care professionals (N=450), with recruitment via MTurk. The sample primarily included male participants from the United States, with the majority aged 42 years or younger. Although a large portion of respondents were medical doctors, the sample also included nurses and other health care professionals. Data were analyzed by structural equation modeling. Results: Regarding the aim of developing an instrument combining adoption and social interaction, the internal consistency was above the aspirational level ($\alpha$>.70) for the instrument. Furthermore, regarding the second objective involving gaining insights into the influential constructs of the anticipated intention to use, the following results were highlighted: in testing the conceptual model, the measurement model generated a good fit and the respecified structural model also generated a good fit. The tested hypotheses confirmed that social interaction constructs could explain a higher variance of users' anticipated intention to use. Perceived social isolation and decreased attentional allocation did not have a significant effect on attitude. Furthermore, the intention to use smart glasses despite nonacceptance of smart glasses by the patient significantly influenced the anticipated intention to use. In summary, constructs that focus on social interaction could contribute to better explanation and prediction of the expected adoption of smart glasses in health care. Conclusions: The empirical findings of this study provide new insights into how the mediation perspective can increase the explained variance compared to existing knowledge about adoption. Against expectations based on previous literature and despite the social issues raised earlier, these social aspects do play important roles for health care professionals but are ultimately not decisive for the intention to use. As a result, there are fewer threats to the adoption of smart glasses from the perspective of health care professionals than might be expected based on the previous literature. Therefore, the use of smart glasses can still be considered as an innovative way of working in health care. ", doi="10.2196/49610", url="https://formative.jmir.org/2025/1/e49610" } @Article{info:doi/10.2196/57443, author="Wu, Richard and Chakka, Keerthana and Belko, Sara and Khargonkar, Ninad and Desai, Kevin and Prabhakaran, Balakrishnan and Annaswamy, Thiru", title="Comparing In-Person, Standard Telehealth, and Remote Musculoskeletal Examination With a Novel Augmented Reality Exercise Game System: Pilot Cross-Sectional Comparison Study", journal="JMIR Serious Games", year="2025", month="Feb", day="5", volume="13", pages="e57443", keywords="physical examination", keywords="telemedicine", keywords="tele-health", keywords="virtual care", keywords="virtual health", keywords="telerehabilitation", keywords="augmented reality", keywords="AR", keywords="game", keywords="simulation", keywords="digital world", keywords="virtual environment", keywords="motion", keywords="strength", keywords="force", keywords="musculoskeletal", keywords="remote examination", keywords="exercise", keywords="physical examinations", keywords="telehealth", keywords="cross-sectional", keywords="VIRTEPEX", keywords="patient", keywords="exergame", abstract="Background: Current telemedicine technologies are not fully optimized for conducting physical examinations. The Virtual Remote Tele-Physical Examination (VIRTEPEX) system, a novel proprietary technology platform using a Microsoft Kinect-based augmented reality game system to track motion and estimate force, has the potential to assist with conducting asynchronous, remote musculoskeletal examinations. Objective: This pilot study evaluated the feasibility of the VIRTEPEX system as a supplement to telehealth musculoskeletal strength assessments. Methods: In this cross-sectional pilot study, 12 study participants with upper extremity pain and/or weakness underwent strength evaluations for four upper extremity movements using in-person, telehealth, VIRTEPEX, and composite (telehealth plus VIRTEPEX) assessments. The evaluators were blinded to each other's assessments. The primary outcome was feasibility, as determined by participant recruitment, study completion, and safety. The secondary outcome was preliminary evaluation of inter-rater agreement between in-person, telehealth, and VIRTEPEX strength assessments, including $\kappa$ statistics. Results: This pilot study had an 80\% recruitment rate, a 100\% completion rate, and reported no adverse events. In-person and telehealth evaluations achieved highest overall agreement (85.71\%), followed by agreements between in-person and composite (75\%), in-person and VIRTEPEX (62.5\%), and telehealth and VIRTEPEX (62.5\%) evaluations. However, for shoulder flexion, agreement between in-person and VIRTEPEX evaluations (78.57\%; $\kappa$=0.571, 95\% CI 0.183 to 0.960) and in-person and composite evaluations (78.57\%; $\kappa$=0.571, 95\% CI 0.183 to 0.960) was higher than that between in-person and telehealth evaluations (71.43\%; $\kappa$=0.429, 95\% CI ?0.025 to 0.882). Conclusions: This study demonstrates the feasibility of asynchronous VIRTEPEX examinations and supports the potential for VIRTEPEX to supplement and add value to standard telehealth platforms. Further studies with an additional development of VIRTEPEX and larger sample sizes for adequate power are warranted. ", doi="10.2196/57443", url="https://games.jmir.org/2025/1/e57443" } @Article{info:doi/10.2196/63939, author="Hamza, Hawa and Aboumarzouk, M. Omar and Al-Ansari, Abdulla and Navkar, V. Nikhil", title="Dynamic Augmented Reality Cues for Telementoring in Minimally Invasive Surgeries: Scoping Review", journal="J Med Internet Res", year="2025", month="Feb", day="3", volume="27", pages="e63939", keywords="minimally invasive surgery", keywords="surgeons", keywords="laparoscopic", keywords="telementoring", keywords="teleproctoring", keywords="telemedicine", keywords="augmented reality", keywords="dynamic visual cue", keywords="technologies", keywords="robotics", keywords="patient outcomes", keywords="communication", keywords="scoping review", keywords="PRISMA", abstract="Background: Remote surgeons use telementoring technologies to provide real-time guidance during minimally invasive surgeries (MIS). Such technologies are continuously improving with the integration of dynamic augmented reality (AR) cues. This includes virtual overlays of hand gestures, pointers, and surgical tools onto the operating surgeon's view. The operating surgeon comprehends this augmented information to operate on the patient. Thus, understanding these dynamic AR cues (either during surgical training or live surgery) is crucial. Objective: In this paper, we aimed to review the existing telementoring technologies that use dynamic AR cues during MIS. This review describes the MIS (including surgery type, specialty, procedure, and clinical trial), the telementoring system, the dynamic AR cues generated by these systems, and evaluation of the technology in terms of technical aspects, user perceptions, skills gained, and patient outcomes. Methods: A scoping review was conducted using PubMed, Web of Science, Scopus, IEEE Xplore, and ACM Digital Library databases. The search terms included ``telementoring,'' ``minimally invasive surgery,'' and ``augmented reality'' without restrictions imposed on the publication year. Articles covering telementoring using dynamic AR cues during MIS, including laparoscopic and robot-assisted, were identified. Results: A total of 21 articles were included and categorized based on type of surgery, the telementoring technology used, and evaluation of the technology. Most of the articles reported on laparoscopic suturing performed using synthetic phantoms. Hand gestures and surgical tools were the most frequently used dynamic AR cues (10 articles on each cue), while the mentors and mentees primarily consisted of experienced surgeons and medical students. The studies assessing the telementoring technologies were either descriptive (7 articles) or analytical (14 articles) where it was compared against no cue, prerecorded visual cue, in-person guidance, audio cue, or static AR cue. Outcomes were measured mostly using skills gained (13 articles) and user perception about the telementoring system. Conclusions: In general, telementoring using dynamic AR cues resulted in positive outcomes during MIS. In some cases, they were considered on par with conventional methods such as audio cues and in-person guidance. Further randomized controlled trials are required to objectively assess its clinical benefits. ", doi="10.2196/63939", url="https://www.jmir.org/2025/1/e63939", url="http://www.ncbi.nlm.nih.gov/pubmed/39899360" } @Article{info:doi/10.2196/58108, author="Celdr{\'a}n, Javier Francisco and Jim{\'e}nez-Ruescas, Javier and Lobato, Carlos and Salazar, Luc{\'i}a and S{\'a}nchez-Margallo, Alberto Juan and S{\'a}nchez-Margallo, M. Francisco and Gonz{\'a}lez, Pascual", title="Use of Augmented Reality for Training Assistance in Laparoscopic Surgery: Scoping Literature Review", journal="J Med Internet Res", year="2025", month="Jan", day="28", volume="27", pages="e58108", keywords="laparoscopic surgery", keywords="surgical training", keywords="surgical simulator", keywords="augmented reality--based laparoscopic simulator", keywords="AR-based laparoscopic simulator", keywords="augmented reality", keywords="mobile phone", abstract="Background: Laparoscopic surgery training is a demanding process requiring technical and nontechnical skills. Surgical training has evolved from traditional approaches to the use of immersive digital technologies such as virtual, augmented, and mixed reality. These technologies are now integral to laparoscopic surgery training. Objective: This scoping literature review aimed to analyze the current augmented reality (AR) solutions used in laparoscopic surgery training. Methods: Following the PRISMA-ScR (Preferred Reporting Items for Systematic Reviews and Meta-Analyses extension for Scoping Reviews) guidelines, we conducted a scoping review using 4 databases: Scopus, IEEE Xplore, PubMed, and ACM. Inclusion and exclusion criteria were applied to select relevant articles. Exclusion criteria were studies not using AR, not focused on laparoscopic surgery, not focused on training, written in a language other than English, or not providing relevant information on the topics studied. After selecting the articles, research questions (RQs) were formulated to guide the review. In total, 2 independent reviewers then extracted relevant data, and a descriptive analysis of the results was conducted. Results: Of 246 initial records, 172 (69.9\%) remained after removing duplicates. After applying the exclusion criteria, 76 articles were selected, with 25 (33\%) later excluded for not meeting quality standards, leaving 51 (67\%) in the final review. Among the devices analyzed (RQ 1), AR video--based devices were the most prevalent (43/51, 84\%). The most common information provided by AR devices (RQ 1) focused on task execution and patient-related data, both appearing in 20\% (10/51) of studies. Regarding sensorization (RQ 2), most studies (46/51, 90\%) incorporated some form of sensorized environment, with computer vision being the most used technology (21/46, 46\%) and the trainee the most frequently sensorized element (41/51, 80\%). Regarding training setups (RQ 3), 39\% (20/51) of the studies used commercial simulators, and 51\% (26/51) made use of artificial models. Concerning the evaluation methods (RQ 4), objective evaluation was the most used, featured in 71\% (36/51) of the studies. Regarding tasks (RQ 5), 43\% (22/51) of studies focused on full surgical procedures, whereas 57\% (29/51) focused on simple training tasks, with suturing being the most common among the latter (11/29, 38\%). Conclusions: This scoping review highlights the evolving role of AR technologies in laparoscopic surgery training, although the impact of optical see-through devices remains unclear due to their limited use. It underscores the potential of emerging technologies such as haptic feedback, computer vision, and eye tracking to further enhance laparoscopic skill acquisition. While most relevant articles from other databases were included, some studies may have been missed due to the specific databases and search strategies used. Moreover, the need for standardized evaluation metrics is emphasized, paving the way for future research into AR's full potential in laparoscopic skill acquisition. ", doi="10.2196/58108", url="https://www.jmir.org/2025/1/e58108" } @Article{info:doi/10.2196/58086, author="Arthur, Tom and Melendez-Torres, GJ and Harris, David and Robinson, Sophie and Wilson, Mark and Vine, Sam", title="Extended Reality Interventions for Health and Procedural Anxiety: Panoramic Meta-Analysis Based on Overviews of Reviews", journal="J Med Internet Res", year="2025", month="Jan", day="8", volume="27", pages="e58086", keywords="virtual reality", keywords="exposure therapy", keywords="distraction techniques", keywords="patient education", keywords="fear", keywords="phobia", keywords="immersive", keywords="anxiety", keywords="meta-analysis", keywords="extended reality", keywords="intervention", keywords="procedural anxiety", keywords="patient anxiety", keywords="exposure", keywords="distraction", abstract="Background: Extended reality (XR) technologies are increasingly being used to reduce health and procedural anxieties. The global effectiveness of these interventions is uncertain, and there is a lack of understanding of how patient outcomes might vary between different contexts and modalities. Objective: This research used panoramic meta-analysis to synthesize evidence across the diverse clinical contexts in which XR is used to address common outcomes of health and procedural anxiety. Methods: Review-level evidence was obtained from 4 databases (MEDLINE, Embase, APA PsycINFO, and Epistemonikos) from the beginning of 2013 until May 30, 2023. Reviews that performed meta-analysis of randomized controlled trials relating to patient-directed XR interventions for health and procedural anxiety were included. Studies that analyzed physiological measures, or focused on technologies that did not include meaningful immersive components, were excluded. Furthermore, data were only included from studies that compared intervention outcomes against no-treatment or treatment-as-usual controls. Analyses followed a preregistered, publicly available protocol. Trial effect sizes were extracted from reviews and expressed as standardized mean differences, which were entered into a 3-level generalized linear model. Here, outcomes were estimated for patients (level 1), studies (level 2), and anxiety indications (level 3), while meta-regressions explored possible influences of age, immersion, and different mechanisms of action. Where relevant, the quality of reviews was appraised using the AMSTAR-2 (A Measurement Tool to Assess Systematic Reviews, Revised Instrument) tool. Results: Data from 83 individual trials were extracted from 18 eligible meta-analyses. Most studies involved pediatric patient groups and focused on procedural, as opposed to general, health anxieties (eg, relating to needle insertion, dental operations, and acute surgery contexts). Interventions targeted distraction-, education-, and exposure-based mechanisms, and were provided via a range of immersive and nonimmersive systems. These interventions proved broadly effective in reducing patient anxiety, with models revealing significant but heterogeneous effects for both procedural (d=--0.75, 95\% CI --0.95 to --0.54) and general health (d=--0.82, 95\% CI --1.20 to --0.45) indications (when compared with nontreatment or usual-care control conditions). For procedural anxieties, effects may be influenced by publication bias and appear more pronounced for children (vs adults) and nonimmersive (vs immersive) technology interventions, but they were not different by indication. Conclusions: Results demonstrate that XR interventions have successfully reduced patient anxiety across diverse clinical contexts. However, significant uncertainty remains about the generalizability of effects within various unexplored indications, and existing evidence is limited in methodological quality. Although current research is broadly positive in this area, it is premature to assert that XR interventions are effective for any given health or procedural anxiety indication. ", doi="10.2196/58086", url="https://www.jmir.org/2025/1/e58086" } @Article{info:doi/10.2196/60792, author="Ebnali Harari, Rayan and Altaweel, Abdullah and Anderson, Erik and Pozner, Charles and Grossmann, Rafael and Goldsmith, Andrew and Shokoohi, Hamid", title="Augmented Reality in Enhancing Operating Room Crisis Checklist Adherence: Randomized Comparative Efficacy Study", journal="JMIR XR Spatial Comput", year="2025", month="Jan", day="6", volume="2", pages="e60792", keywords="augmented reality", keywords="operating room", keywords="crisis checklist", keywords="checklist", keywords="guideline adherence", keywords="quality improvement", keywords="patient safety", keywords="cardiac arrest", keywords="hypotension", keywords="hyperthermia", keywords="critical care", keywords="emergency department", abstract="Background: Effective crisis management in operating rooms (ORs) is crucial for patient safety. Despite their benefits, adherence to OR crisis checklists is often limited, highlighting the need for innovative solutions. Objective: The objective of this study was to evaluate the efficacy of augmented reality (AR)-enhanced checklists in improving protocol adherence, compared to traditional paper checklists and no checklist scenarios during simulated OR crises. Methods: This study was a randomized comparative efficacy study comparing the utility of AR checklists, paper checklists, and no checklist scenarios using 4 validated and simulated OR crises scenarios: asystolic cardiac arrest, air embolism, unexplained hypotension/hypoxia, and malignant hyperthermia. The study took place in a simulated OR setting and had applicability to the standard procedures in ORs, critical care units, and urgent care scenarios in the emergency department. To form the 24 OR teams, 50 professionals including 24 anesthesiologists, 24 nurses, 1 surgeon, and 1 scrub nurse from two academic hospitals were included. The primary outcome measured was the failure to adhere (FTA) rate for critical actions during simulated OR crises. Adherence was determined using retrospective video analysis involving 595 key processes evaluated across 24 surgical teams. Interrater reliability was assessed using a Cohen $\kappa$. Secondary outcomes included checklist usability and cognitive load, as measured by the low-frequency to high-frequency (LF/HF) ratio of the heart rate variability. Results: The AR checklist group showed a significantly lower FTA rate (mean 15.1\%, SD 5.77\%) compared to the paper checklist (mean 8.32\%, SD 5.65\%; t23=?2.08; P=.048) and the no checklist groups (mean 29.81\%, SD 5.59\%; t23=?6.47; P<.001). The AR checklist also resulted in a higher LF/HF ratio for anesthesiologists (F2,46=4.88; P=.02), showing a potential increase in the level of cognitive load. Survey data indicated positive receptions for both AR and paper checklists. Conclusions: These results suggest that AR checklists could offer a viable method for enhancing adherence to critical care protocols. Although, further research is needed to fully assess their impact on clinical outcomes and to address any associated increase in cognitive load. ", doi="10.2196/60792", url="https://xr.jmir.org/2025/1/e60792" } @Article{info:doi/10.2196/57030, author="Samson, Laurent and Carcreff, Lena and Noublanche, Fr{\'e}d{\'e}ric and Noublanche, Sophie and Vermersch-Leiber, H{\'e}l{\`e}ne and Annweiler, C{\'e}dric", title="User Experience of a Semi-Immersive Musical Serious Game to Stimulate Cognitive Functions in Hospitalized Older Patients: Questionnaire Study", journal="JMIR Serious Games", year="2025", month="Jan", day="6", volume="13", pages="e57030", keywords="virtual reality", keywords="geriatrics", keywords="reminiscence", keywords="episodic memory", keywords="serious game", keywords="neurocognitive disorders", keywords="older adults", keywords="user experience", abstract="Background: Reminiscence therapy through music is a psychosocial intervention with benefits for older patients with neurocognitive disorders. Therapies using virtual or augmented reality are efficient in ecologically assessing, and eventually training, episodic memory in older populations. We designed a semi-immersive musical game called ``A Life in Songs,'' which invites patients to immerse themselves in a past era through visuals and songs from that time period. The game aspires to become a playful, easy-to-use, and complete tool for the assessment, rehabilitation, and prevention of neurocognitive decline associated with aging. Objective: This study aimed to assess the user experience (UX) associated with the newly designed serious game. Methods: After one or several sessions of the game guided by the therapist, patients of the geriatric wards were asked to answer questions selected from 2 widely known UX scales (AttrakDiff and meCUE [modular evaluation of the components of user experience]) with the therapist's help. The internal consistency of the UX dimensions was assessed through Cronbach $\alpha$ to verify the validity of the dimensions. The level of engagement of the patient throughout the experimental session was also assessed following an internally developed scale, which included 5 levels (interactive, constructive, active, passive, and disengaged behaviors). UX mean scores were computed and presented graphically. Verbal feedbacks were reported to support the quantitative results. Results: Overall, 60 inpatients with a mean age of 84.2 (SD 5.5) years, the majority of whom were women (41/60, 68\%), were included. Their score on the Mini-Mental State Examination (MMSE) ranged between 12 and 29. A majority of patients (27/56, 48\%) had no major neurocognitive disorder (MNCD), 22/56 (39\%) had mild MNCD, and 7/56 (13\%) had moderate MNCD. The results revealed very positive UX with mean values beyond the neutral values for every UX dimension of both scales. The overall mean (SD) judgment was rated 3.92 (SD 0.87) (on a scale of ?5 to 5). Internal consistency was acceptable to good for the emotional dimensions of the meCUE. Questionable to unacceptable consistency was found for the other UX dimensions. Participants were mostly active (23/60, 38\%) and constructive (21/60, 35\%). Conclusions: These findings demonstrated a very good appreciation of the game by geriatric inpatients. Participants' and health care professionals' verbal comments strongly aligned with the quantitative results. The poor internal consistency in the UX dimensions reflected the high heterogeneity among the included patients. Further studies are needed to evaluate the potential benefits of clinical factors such as neurocognitive functions, mood, depression, or quality of life. ", doi="10.2196/57030", url="https://games.jmir.org/2025/1/e57030" } @Article{info:doi/10.2196/60374, author="Pearce, Nettleton Louise Michelle and Pryor, Julie and Redhead, Jason and Sherrington, Catherine and Hassett, Leanne", title="Advanced Technology in a Real-World Rehabilitation Setting: Longitudinal Observational Study on Clinician Adoption and Implementation", journal="J Med Internet Res", year="2024", month="Dec", day="30", volume="26", pages="e60374", keywords="rehabilitation", keywords="technology", keywords="digital health", keywords="virtual reality", keywords="robotics", keywords="exoskeleton device", keywords="implementation science", keywords="physiotherapy", keywords="physical therapy", keywords="occupational therapy", keywords="mobile phone", abstract="Background: Advanced technologies are becoming increasingly accessible in rehabilitation. Current research suggests technology can increase therapy dosage, provide multisensory feedback, and reduce manual handling for clinicians. While more high-quality evidence regarding the effectiveness of rehabilitation technologies is needed, understanding of how to effectively integrate technology into clinical practice is also limited. Current implementation of rehabilitation technology is inconsistent, with low uptake among clinicians and frequent reports of technology abandonment. An Australian rehabilitation provider opened a new technology therapy center in 2022, offering a unique opportunity to generate practice-based evidence to inform future technology implementation and research. Objective: This study aimed to investigate the implementation and adoption of advanced technology within a real-world rehabilitation setting. Methods: This study was a longitudinal observational study in a rehabilitation organization with inpatient, outpatient, and community settings. Allied health clinicians (n=119) within the organization had access to advanced technologies, with patients receiving neurological, spinal cord injury, brain injury, or general rehabilitation. Interventions included 21 advanced technologies, including robotic, virtual reality (VR), sensor-based, and functional electrical stimulation devices. Clinicians received training for devices in a staged approach by external and internal trainers. Data were collected from patient electronic medical records from July 1, 2022, to June 30, 2023. Outcomes included frequency of advanced technology use, patient demographics (age, gender, and primary health condition), clinician discipline, rehabilitation service (inpatient, outpatient, or community), goals of technology therapy sessions, and therapy dosage achieved (minutes active, number of repetitions, and meters walked). Results: Clinicians used advanced technology 4208 times with 269 patients over 12 months; specifically, physiotherapists (2716/4208, 65\%), occupational therapists (1396/4208, 33\%), and allied health assistants (96/4208, 2\%). The majority of patients had stroke, spinal cord injury, or brain injury diagnoses (188/269, 70\%). Devices were typically used to target impairment and activity limitation--related goals. Frequently used devices included gait training body-weight support (VR treadmill and overground), overground robotic exoskeletons, and upper limb robotic VR devices. Outpatient services were the dominant users of advanced technology (3940/4208, 94\%). Clinicians most commonly used devices for patients with stroke (1973/4208, 47\%) and the greatest variety of devices for patients with stroke and spinal cord injury. The relative use of lower limb robotic devices was greater in inpatient services (91/178, 51\%, vs outpatient services, 963/2335, 41\%) ($\chi$21=6.6, P=.01) and for patients with spinal cord injury (48/95, 51\%, vs all other conditions, between 24\%-31\%; $\chi$25=16.8, P=.005). Conclusions: The type and amount of advanced technology use differed between patient populations and rehabilitation settings. To support clinician use of advanced technology, devices should match the rehabilitation context. Tailored strategies are important, such as clinician training. Further practice-based research is required to provide guidance on implementation and to establish the effectiveness of advanced technology use. ", doi="10.2196/60374", url="https://www.jmir.org/2024/1/e60374" } @Article{info:doi/10.2196/57327, author="Sun, Wan-Na and Hsieh, Min-Chai and Wang, Wei-Fang", title="Nurses' Knowledge and Skills After Use of an Augmented Reality App for Advanced Cardiac Life Support Training: Randomized Controlled Trial", journal="J Med Internet Res", year="2024", month="Dec", day="5", volume="26", pages="e57327", keywords="augmented reality", keywords="technology intervention", keywords="randomized controlled trial", keywords="advanced cardiac life support", keywords="nursing education", abstract="Background: Advanced cardiac life support (ACLS) skills are essential for nurses. During the COVID-19 pandemic, augmented reality (AR) technologies were incorporated into medical education to increase learning motivation and accessibility. Objective: This study aims to determine whether AR for educational applications can significantly improve crash cart learning, learning motivation, cognitive load, and system usability. It focused on a subgroup of nurses with less than 2 years of experience. Methods: This randomized controlled trial study was conducted in a medical center in southern Taiwan. An ACLS cart training course was developed using AR technologies in the first stage. Additionally, the efficacy of the developed ACLS training course was evaluated. The AR group used a crash cart learning system developed with AR technology, while the control group received traditional lecture-based instruction. Both groups were evaluated immediately after the course. Performance was assessed through learning outcomes related to overall ACLS and crash cart use. The Instructional Materials Motivation Survey, System Usability Scale, and Cognitive Load Theory Questionnaire were also used to assess secondary outcomes in the AR group. Subgroup analyses were performed for nurses with less than 2 years of experience. Results: All 102 nurses completed the course, with 43 nurses in the AR group and 59 nurses in the control group. The AR group outperformed the control group regarding overall ACLS outcomes and crash cart learning outcomes (P=.002; P=.01). The improvement rate was the largest for new staff regardless of the overall learning effect and the crash cart effect. Subgroup analysis revealed that nurses with less than 2 years of experience in the AR group showed more significant improvements in both overall learning (P<.001) and crash cart outcomes (P<.001) compared to their counterparts in the control group. For nurses with more than 2 years of experience, no significant differences were found between the AR and control groups in posttraining learning outcomes for the crash cart (P=.32). The AR group demonstrated high scores for motivation (Instructional Materials Motivation Survey mean score 141.65, SD 19.25) and system usability (System Usability Scale mean score 90.47, SD 11.91), as well as a low score for cognitive load (Cognitive Load Theory Questionnaire mean score 15.42, SD 5.76). Conclusions: AR-based learning significantly improves ACLS knowledge and skills, especially for nurses with less experience, compared to traditional methods. The high usability and motivational benefits of AR suggest its potential for broader applications in nursing education. Trial Registration: ClinicalTrials.gov NCT06057285; https://clinicaltrials.gov/ct2/show/NCT06057285 ", doi="10.2196/57327", url="https://www.jmir.org/2024/1/e57327" } @Article{info:doi/10.2196/53157, author="Zhang, Zhan and Bai, Enze and Xu, Yincao and Stepanian, Aram and Kutzin, M. Jared and Adelgais, Kathleen and Ozkaynak, Mustafa", title="A Smart Glass Telemedicine Application for Prehospital Communication: User-Centered Design Study", journal="J Med Internet Res", year="2024", month="Nov", day="29", volume="26", pages="e53157", keywords="smart glass", keywords="telemedicine", keywords="participatory design", keywords="emergency medical service", keywords="health care", keywords="prehospital care", keywords="mobile health", keywords="mHealth", keywords="augmented reality", abstract="Background: Smart glasses have emerged as a promising solution for enhancing communication and care coordination among distributed medical teams. While prior research has explored the feasibility of using smart glasses to improve prehospital communication between emergency medical service (EMS) providers and remote physicians, a research gap remains in understanding the specific requirements and needs of EMS providers for smart glass implementation. Objective: This study aims to iteratively design and evaluate a smart glass application tailored for prehospital communication by actively involving prospective users in the system design process. Methods: Grounded in participatory design, the study consisted of 2 phases of design requirement gathering, rapid prototyping, usability testing, and prototype refinement. In total, 43 distinct EMS providers with diverse backgrounds participated in this 2-year long iterative design process. All qualitative data (eg, transcribed interviews and discussions) were iteratively coded and analyzed by at least 2 researchers using thematic analysis. Quantitative data, such as System Usability Scale (SUS) scores and feature ratings, were analyzed using statistical methods. Results: Our research identified challenges in 2 essential prehospital communication activities: contacting online medical control (OLMC) physicians for medical guidance and notifying receiving hospital teams of incoming patients. The iterative design process led to the identification of 5 key features that could potentially address the identified challenges: video call functionality with OLMC physicians, call priority indication for expedited OLMC contact, direct communication with receiving hospitals, multimedia patient information sharing, and touchless interaction methods for operating the smart glasses. The SUS score for our system design improved from a mean of 74.3 (SD 11.3) in the first phase (classified as good usability) to 80.3 (SD 13.1) in the second phase (classified as excellent usability). This improvement, along with consistently high ratings for other aspects (eg, willingness to use and feature design), demonstrated continuous enhancement of the system's design across the 2 phases. Additionally, significant differences in SUS scores were observed between EMS providers in urban areas (median 85, IQR 76-94) and rural areas (median 72.5, IQR 66-83; Mann-Whitney U=43; P=.17), as well as between paramedics (median 72.5, IQR 70-80) and emergency medical technicians (median 85, IQR: 74-98; Mann-Whitney U=44.5; P=.13), suggesting that EMS providers in urban settings and those with less training in treating patients in critical conditions perceived the smart glass application as more useful and user-friendly. Finally, the study also identified several concerns regarding the adoption of the smart glass application, including technical limitations, environmental constraints, and potential barriers to workflow integration. Conclusions: Using a participatory design approach, this study provided insights into designing user-friendly smart glasses that address the current challenges EMS providers face in dynamic prehospital settings. ", doi="10.2196/53157", url="https://www.jmir.org/2024/1/e53157" } @Article{info:doi/10.2196/56790, author="Vlake, H. Johan and Drop, Q. Denzel L. and Van Bommel, Jasper and Riva, Giuseppe and Wiederhold, K. Brenda and Cipresso, Pietro and Rizzo, S. Albert and Rothbaum, O. Barbara and Botella, Cristina and Hooft, Lotty and Bienvenu, J. Oscar and Jung, Christian and Geerts, Bart and Wils, Evert-Jan and Gommers, Diederik and van Genderen, E. Michel and ", title="Reporting Guidelines for the Early-Phase Clinical Evaluation of Applications Using Extended Reality: RATE-XR Qualitative Study Guideline", journal="J Med Internet Res", year="2024", month="Nov", day="29", volume="26", pages="e56790", keywords="extended reality", keywords="XR", keywords="virtual reality", keywords="augmented reality", keywords="mixed reality", keywords="reporting guideline", keywords="Delphi process", keywords="consensus", keywords="computer-generated simulation", keywords="simulation", keywords="virtual world", keywords="simulation experience", keywords="clinical evaluation", abstract="Background: Extended reality (XR), encompassing technologies such as virtual reality, augmented reality, and mixed reality, has rapidly gained prominence in health care. However, existing XR research often lacks rigor, proper controls, and standardization. Objective: To address this and to enhance the transparency and quality of reporting in early-phase clinical evaluations of XR applications, we present the ``Reporting for the early-phase clinical evaluation of applications using extended reality'' (RATE-XR) guideline. Methods: We conducted a 2-round modified Delphi process involving experts from diverse stakeholder categories, and the RATE-XR is therefore the result of a consensus-based, multistakeholder effort. Results: The guideline comprises 17 XR-specific (composed of 18 subitems) and 14 generic reporting items, each with a complementary Explanation \& Elaboration section. Conclusions: The items encompass critical aspects of XR research, from clinical utility and safety to human factors and ethics. By offering a comprehensive checklist for reporting, the RATE-XR guideline facilitates robust assessment and replication of early-stage clinical XR studies. It underscores the need for transparency, patient-centeredness, and balanced evaluation of the applications of XR in health care. By providing an actionable checklist of minimal reporting items, this guideline will facilitate the responsible development and integration of XR technologies into health care and related fields. ", doi="10.2196/56790", url="https://www.jmir.org/2024/1/e56790" } @Article{info:doi/10.2196/59469, author="Stankoski, Simon and Kiprijanovska, Ivana and Gjoreski, Martin and Panchevski, Filip and Sazdov, Borjan and Sofronievski, Bojan and Cleal, Andrew and Fatoorechi, Mohsen and Nduka, Charles and Gjoreski, Hristijan", title="Controlled and Real-Life Investigation of Optical Tracking Sensors in Smart Glasses for Monitoring Eating Behavior Using Deep Learning: Cross-Sectional Study", journal="JMIR Mhealth Uhealth", year="2024", month="Sep", day="26", volume="12", pages="e59469", keywords="chewing detection", keywords="eating detection", keywords="smart glasses", keywords="automatic dietary monitoring", keywords="eating behavior", abstract="Background: The increasing prevalence of obesity necessitates innovative approaches to better understand this health crisis, particularly given its strong connection to chronic diseases such as diabetes, cancer, and cardiovascular conditions. Monitoring dietary behavior is crucial for designing effective interventions that help decrease obesity prevalence and promote healthy lifestyles. However, traditional dietary tracking methods are limited by participant burden and recall bias. Exploring microlevel eating activities, such as meal duration and chewing frequency, in addition to eating episodes, is crucial due to their substantial relation to obesity and disease risk. Objective: The primary objective of the study was to develop an accurate and noninvasive system for automatically monitoring eating and chewing activities using sensor-equipped smart glasses. The system distinguishes chewing from other facial activities, such as speaking and teeth clenching. The secondary objective was to evaluate the system's performance on unseen test users using a combination of laboratory-controlled and real-life user studies. Unlike state-of-the-art studies that focus on detecting full eating episodes, our approach provides a more granular analysis by specifically detecting chewing segments within each eating episode. Methods: The study uses OCO optical sensors embedded in smart glasses to monitor facial muscle activations related to eating and chewing activities. The sensors measure relative movements on the skin's surface in 2 dimensions (X and Y). Data from these sensors are analyzed using deep learning (DL) to distinguish chewing from other facial activities. To address the temporal dependence between chewing events in real life, we integrate a hidden Markov model as an additional component that analyzes the output from the DL model. Results: Statistical tests of mean sensor activations revealed statistically significant differences across all 6 comparison pairs (P<.001) involving 2 sensors (cheeks and temple) and 3 facial activities (eating, clenching, and speaking). These results demonstrate the sensitivity of the sensor data. Furthermore, the convolutional long short-term memory model, which is a combination of convolutional and long short-term memory neural networks, emerged as the best-performing DL model for chewing detection. In controlled laboratory settings, the model achieved an F1-score of 0.91, demonstrating robust performance. In real-life scenarios, the system demonstrated high precision (0.95) and recall (0.82) for detecting eating segments. The chewing rates and the number of chews evaluated in the real-life study showed consistency with expected real-life eating behaviors. Conclusions: The study represents a substantial advancement in dietary monitoring and health technology. By providing a reliable and noninvasive method for tracking eating behavior, it has the potential to revolutionize how dietary data are collected and used. This could lead to more effective health interventions and a better understanding of the factors influencing eating habits and their health implications. ", doi="10.2196/59469", url="https://mhealth.jmir.org/2024/1/e59469", url="http://www.ncbi.nlm.nih.gov/pubmed/39325528" } @Article{info:doi/10.2196/50066, author="Berglund, Aseel and Klompstra, Leonie and Or{\"a}dd, Helena and Fallstr{\"o}m, Johan and Str{\"o}mberg, Anna and Jaarsma, Tiny and Berglund, Erik", title="The Rationale Behind the Design Decisions in an Augmented Reality Mobile eHealth Exergame to Increase Physical Activity for Inactive Older People With Heart Failure", journal="JMIR Serious Games", year="2024", month="Aug", day="21", volume="12", pages="e50066", keywords="sedentary", keywords="exercise", keywords="exertion", keywords="exergames", keywords="technology", keywords="training", keywords="inactivity", keywords="eHealth application", keywords="heart disease", keywords="physical activity", doi="10.2196/50066", url="https://games.jmir.org/2024/1/e50066" } @Article{info:doi/10.2196/56916, author="Albright, Liam and Ko, Woojin and Buvanesh, Meyhaa and Haraldsson, Harald and Polubriaginof, Fernanda and Kuperman, J. Gilad and Levy, Michelle and Sterling, R. Madeline and Dell, Nicola and Estrin, Deborah", title="Opportunities and Challenges for Augmented Reality in Family Caregiving: Qualitative Video Elicitation Study", journal="JMIR Form Res", year="2024", month="May", day="30", volume="8", pages="e56916", keywords="augmented reality", keywords="extended reality", keywords="family caregiver", keywords="home care", keywords="virtual care", keywords="telemedicine", keywords="telehealth", keywords="oncology", keywords="artificial intelligence", keywords="mobile phone", abstract="Background: Although family caregivers play a critical role in care delivery, research has shown that they face significant physical, emotional, and informational challenges. One promising avenue to address some of caregivers' unmet needs is via the design of digital technologies that support caregivers' complex portfolio of responsibilities. Augmented reality (AR) applications, specifically, offer new affordances to aid caregivers as they perform care tasks in the home. Objective: This study explored how AR might assist family caregivers with the delivery of home-based cancer care. The specific objectives were to shed light on challenges caregivers face where AR might help, investigate opportunities for AR to support caregivers, and understand the risks of AR exacerbating caregiver burdens. Methods: We conducted a qualitative video elicitation study with clinicians and caregivers. We created 3 video elicitations that offer ways in which AR might support caregivers as they perform often high-stakes, unfamiliar, and anxiety-inducing tasks in postsurgical cancer care: wound care, drain care, and rehabilitative exercise. The elicitations show functional AR applications built using Unity Technologies software and Microsoft Hololens2. Using elicitations enabled us to avoid rediscovering known usability issues with current AR technologies, allowing us to focus on high-level, substantive feedback on potential future roles for AR in caregiving. Moreover, it enabled nonintrusive exploration of the inherently sensitive in-home cancer care context. Results: We recruited 22 participants for our study: 15 clinicians (eg, oncologists and nurses) and 7 family caregivers. Our findings shed light on clinicians' and caregivers' perceptions of current information and communication challenges caregivers face as they perform important physical care tasks as part of cancer treatment plans. Most significant was the need to provide better and ongoing support for execution of caregiving tasks in situ, when and where the tasks need to be performed. Such support needs to be tailored to the specific needs of the patient, to the stress-impaired capacities of the caregiver, and to the time-constrained communication availability of clinicians. We uncover opportunities for AR technologies to potentially increase caregiver confidence and reduce anxiety by supporting the capture and review of images and videos and by improving communication with clinicians. However, our findings also suggest ways in which, if not deployed carefully, AR technologies might exacerbate caregivers' already significant burdens. Conclusions: These findings can inform both the design of future AR devices, software, and applications and the design of caregiver support interventions based on already available technology and processes. Our study suggests that AR technologies and the affordances they provide (eg, tailored support, enhanced monitoring and task accuracy, and improved communications) should be considered as a part of an integrated care journey involving multiple stakeholders, changing information needs, and different communication channels that blend in-person and internet-based synchronous and asynchronous care, illness, and recovery. ", doi="10.2196/56916", url="https://formative.jmir.org/2024/1/e56916", url="http://www.ncbi.nlm.nih.gov/pubmed/38814705" } @Article{info:doi/10.2196/54188, author="Yoo, Suyoung and Heo, Sejin and Song, Soojin and Park, Aeyoung and Cho, Hyunchung and Kim, Yuna and Cha, Chul Won and Kim, Kyeongsug and Son, Hi Meong", title="Adoption of Augmented Reality in Educational Programs for Nurses in Intensive Care Units of Tertiary Academic Hospitals: Mixed Methods Study", journal="JMIR Serious Games", year="2024", month="May", day="23", volume="12", pages="e54188", keywords="augmented reality", keywords="AR", keywords="clinical skills education", keywords="nurse education", keywords="technology-based education", keywords="education", keywords="nurse", keywords="nursing", keywords="allied health", keywords="technology-enhanced learning", keywords="interview", keywords="training", keywords="usability", keywords="acceptability", keywords="educational", keywords="teaching", keywords="ICU", keywords="intensive care unit", keywords="self-guided", keywords="self-directed", keywords="hands-on", keywords="adoption", keywords="TAM", keywords="Technology Acceptance Model", keywords="skill", keywords="acquisition", abstract="Background: In the wake of challenges brought by the COVID-19 pandemic to conventional medical education, the demand for innovative teaching methods has surged. Nurse training, with its focus on hands-on practice and self-directed learning, encountered significant hurdles with conventional approaches. Augmented reality (AR) offers a potential solution to addressing this issue. Objective: The aim of this study was to develop, introduce, and evaluate an AR-based educational program designed for nurses, focusing on its potential to facilitate hands-on practice and self-directed learning. Methods: An AR-based educational program for nursing was developed anchored by the Kern six-step framework. First, we identified challenges in conventional teaching methods through interviews and literature reviews. Interviews highlighted the need for hands-on practice and on-site self-directed learning with feedback from a remote site. The training goals of the platform were established by expert trainers and researchers, focusing on the utilization of a ventilator and extracorporeal membrane oxygenation system. Intensive care nurses were enrolled to evaluate AR education. We then assessed usability and acceptability of the AR training using the System Usability Scale and Technology Acceptance Model with intensive care nurses who agreed to test the new platform. Additionally, selected participants provided deeper insights through semistructured interviews. Results: This study highlights feasibility and key considerations for implementing an AR-based educational program for intensive care unit nurses, focusing on training objectives of the platform. Implemented over 2 months using Microsoft Dynamics 365 Guides and HoloLens 2, 28 participants were trained. Feedback gathered through interviews with the trainers and trainees indicated a positive reception. In particular, the trainees mentioned finding AR particularly useful for hands-on learning, appreciating its realism and the ability for repetitive practice. However, some challenges such as difficulty in adapting to the new technology were expressed. Overall, AR exhibits potential as a supplementary tool in nurse education. Conclusions: To our knowledge, this is the first study to substitute conventional methods with AR in this specific area of critical care nursing. These results indicate the multiple principal factors to take into consideration when adopting AR education in hospitals. AR is effective in promoting self-directed learning and hands-on practice, with participants displaying active engagement and enhanced skill acquisition. Trial Registration: ClinicalTrials.gov NCT05629663; https://clinicaltrials.gov/study/NCT05629663. ", doi="10.2196/54188", url="https://games.jmir.org/2024/1/e54188", url="http://www.ncbi.nlm.nih.gov/pubmed/38780998" } @Article{info:doi/10.2196/55569, author="Zhao, Licong and Agazzi, Heather and Du, Yasong and Meng, Hongdao and Maku, Renya and Li, Ke and Aspinall, Peter and Garvan, Wilson Cynthia and Fang, Shuanfeng", title="A Digital Cognitive-Physical Intervention for Attention-Deficit/Hyperactivity Disorder: Randomized Controlled Trial", journal="J Med Internet Res", year="2024", month="May", day="10", volume="26", pages="e55569", keywords="school-age children", keywords="cognitive training", keywords="exercise therapy", keywords="gamification", keywords="ADHD", keywords="attention deficit", keywords="attention-deficit/hyperactivity disorder", keywords="RCT", keywords="randomized controlled trial", keywords="executive function", keywords="digital intervention", keywords="AR", keywords="augmented reality", abstract="Background: Attention-deficit/hyperactivity disorder (ADHD) is one of the most common neurodevelopmental disorders among children. Pharmacotherapy has been the primary treatment for ADHD, supplemented by behavioral interventions. Digital and exercise interventions are promising nonpharmacologic approaches for enhancing the physical and psychological health of children with ADHD. However, the combined impact of digital and exercise therapies remains unclear. Objective: The aim of this study was to determine whether BrainFit, a novel digital intervention combining gamified cognitive and exercise training, is efficacious in reducing ADHD symptoms and executive function (EF) among school-aged children with ADHD. Methods: This 4-week prospective randomized controlled trial included 90 children (6-12 years old) who visited the ADHD outpatient clinic and met the diagnostic criteria for ADHD. The participants were randomized (1:1) to the BrainFit intervention (n=44) or a waitlist control (n=46) between March and August 2022. The intervention consisted of 12 30-minute sessions delivered on an iPad over 4 weeks with 3 sessions per week (Monday, Wednesday, and Friday after school) under the supervision of trained staff. The primary outcomes were parent-rated symptoms of attention and hyperactivity assessed according to the Swanson, Nolan, and Pelham questionnaire (SNAP-IV) rating scale and EF skills assessed by the Behavior Rating Inventory of Executive Function (BRIEF) scale, evaluated pre and post intervention. Intention-to-treat analysis was performed on 80 children after attrition. A nonparametric resampling-based permutation test was used for hypothesis testing of intervention effects. Results: Among the 145 children who met the inclusion criteria, 90 consented and were randomized; ultimately, 80 (88.9\%) children completed the study and were included in the analysis. The participants' average age was 8.4 (SD 1.3) years, including 63 (78.8\%) male participants. The most common ADHD subtype was hyperactive/impulsive (54/80, 68\%) and 23 (29\%) children had severe symptoms. At the endpoint of the study, the BrainFit intervention group had a significantly larger improvement in total ADHD symptoms (SNAP-IV total score) as compared to those in the control group ($\beta$=--12.203, 95\% CI --17.882 to --6.523; P<.001), owing to lower scores on the subscales Inattention ($\beta$=--3.966, 95\% CI --6.285 to --1.647; P<.001), Hyperactivity/Impulsivity ($\beta$=--5.735, 95\% CI --8.334 to --3.137; P<.001), and Oppositional Defiant Disorder ($\beta$=--2.995, 95\% CI --4.857 to --1.132; P=.002). The intervention was associated with significant reduction in the Metacognition Index ($\beta$=--6.312, 95\% CI --10.973 to --1.650; P=.006) and Global Executive Composite ($\beta$=--5.952, 95\% CI --10.214 to --1.690; P=.003) on the BRIEF. No severe intervention-related adverse events were reported. Conclusions: This novel digital cognitive-physical intervention was efficacious in school-age children with ADHD. A larger multicenter effectiveness trial with longer follow-up is warranted to confirm these findings and to assess the durability of treatment effects. Trial Registration: Chinese Clinical Trial Register ChiCTR2300070521; https://www.chictr.org.cn/showproj.html?proj=177806 ", doi="10.2196/55569", url="https://www.jmir.org/2024/1/e55569", url="http://www.ncbi.nlm.nih.gov/pubmed/38728075" } @Article{info:doi/10.2196/55156, author="Willinger, Laura and B{\"o}hm, Birgit and Schweizer, Florian and Reimer, Marie Lara and Jonas, Stephan and Scheller, A. Daniel and Oberhoffer-Fritz, Renate and M{\"u}ller, Jan", title="KIJANI App to Promote Physical Activity in Children and Adolescents: Protocol for a Mixed Method Evaluation", journal="JMIR Res Protoc", year="2024", month="May", day="3", volume="13", pages="e55156", keywords="physical activity", keywords="health promotion", keywords="digital health", keywords="gamification", keywords="childhood", keywords="adolescence", keywords="adolescents", keywords="adolescent", keywords="children", keywords="augmented reality", keywords="KIJANI intervention", keywords="KIJANI", keywords="intervention", keywords="user experience", abstract="Background: The prevalence of physical inactivity among children and adolescents is alarmingly high despite the well-documented and comprehensive benefits of regular physical activity (PA). Therefore, PA promotion should start early in childhood and adolescence. Although reducing recreational screen time in children and adolescents is an urgent concern, digital approaches have the potential to make activity promotion attractive and age appropriate for the target group. KIJANI is a mobile app approach to promote PA in children and adolescents via gamification and augmented reality. Objective: This study protocol aims to describe the KIJANI intervention in detail, as well as the evaluation approach. Methods: KIJANI is based on the concept that virtual coins can be earned through PA, for example, in the form of a collected step count. With these coins, in turn, blocks can be bought, which can be used to create virtual buildings and integrate them into the player's real-world environment via augmented reality. PA of users is detected via accelerometers integrated into the smartphones. KIJANI can be played at predefined play locations that were comprehensively identified as safe, child-friendly, and attractive for PA by the target group in a partner project. The evaluation process will be divided into 2 different stages. The phase-I evaluation will be a mixed methods approach with one-on-one semistructured interviews and questionnaires to evaluate the user experience and receive feedback from the target group. After the implementation of results and feedback from the target group, the phase-II evaluation will proceed in the form of a 2-arm randomized controlled trial, in which the effectiveness of KIJANI will be assessed via objectively measured PA as well as questionnaires. Results: The study received ethical approval from the ethical board of the Technical University of Munich. Participants for the phase-I evaluation are currently being recruited. Conclusions: The study will help to determine the efficacy, applicability, and user experience of a gamified activity promotion application in children and adolescents. Overall, digital health approaches provide easy and wide reachability at low cost and are age appropriate and attractive for the target group of adolescents. Strategies have to be developed to apply digital health approaches in the best possible way for activity promotion. International Registered Report Identifier (IRRID): DERR1-10.2196/55156 ", doi="10.2196/55156", url="https://www.researchprotocols.org/2024/1/e55156", url="http://www.ncbi.nlm.nih.gov/pubmed/38700911" } @Article{info:doi/10.2196/56056, author="Reategui-Rivera, Mahony C. and Villarreal-Zegarra, David and De La Cruz-Torralva, Kelly and D{\'i}az-S{\'a}nchez, Paquita and Finkelstein, Joseph", title="Immersive Technologies for Depression Care: Scoping Review", journal="JMIR Ment Health", year="2024", month="Apr", day="25", volume="11", pages="e56056", keywords="depression", keywords="immersive technologies", keywords="virtual reality", keywords="augmented reality", keywords="mobile phone", abstract="Background: Depression significantly impacts quality of life, affecting approximately 280 million people worldwide. However, only 16.5\% of those affected receive treatment, indicating a substantial treatment gap. Immersive technologies (IMTs) such as virtual reality (VR) and augmented reality offer new avenues for treating depression by creating immersive environments for therapeutic interventions. Despite their potential, significant gaps exist in the current evidence regarding the design, implementation, and use of IMTs for depression care. Objective: We aim to map the available evidence on IMT interventions targeting depression treatment. Methods: This scoping review followed a methodological framework, and we systematically searched databases for studies on IMTs and depression. The focus was on randomized clinical trials involving adults and using IMTs. The selection and charting process involved multiple reviewers to minimize bias. Results: The search identified 16 peer-reviewed articles, predominantly from Europe (n=10, 63\%), with a notable emphasis on Poland (n=9, 56\%), which contributed to more than half of the articles. Most of the studies (9/16, 56\%) were conducted between 2020 and 2021. Regarding participant demographics, of the 16 articles, 5 (31\%) exclusively involved female participants, and 7 (44\%) featured participants whose mean or median age was >60 years. Regarding technical aspects, all studies focused on VR, with most using stand-alone VR headsets (14/16, 88\%), and interventions typically ranging from 2 to 8 weeks, predominantly in hospital settings (11/16, 69\%). Only 2 (13\%) of the 16 studies mentioned using a specific VR design framework in planning their interventions. The most frequently used therapeutic approach was Ericksonian psychotherapy, used in 56\% (9/16) of the studies. Notably, none of the articles reported using an implementation framework or identified barriers and enablers to implementation. Conclusions: This scoping review highlights the growing interest in using IMTs, particularly VR, for depression treatment but emphasizes the need for more inclusive and comprehensive research. Future studies should explore varied therapeutic approaches and cost-effectiveness as well as the inclusion of augmented reality to fully realize the potential of IMTs in mental health care. ", doi="10.2196/56056", url="https://mental.jmir.org/2024/1/e56056", url="http://www.ncbi.nlm.nih.gov/pubmed/38663004" } @Article{info:doi/10.2196/53212, author="Min, Jennifer Hyojin and Andler, Caroline and Barber, Banca Rebecca Ortiz La and Chang, P. Todd and Abelairas-Gomez, Cristian and Knowlin, T. Laquanda and Liu, R. Deborah and Fija{\v c}ko, Nino", title="Virtual Reality for Basic Life Support Training in High School Students: Thematic Analysis of Focus Group Interviews", journal="JMIR XR Spatial Comput", year="2024", month="Apr", day="16", volume="1", pages="e53212", keywords="virtual reality", keywords="mixed reality", keywords="technology", keywords="basic life support", keywords="cardiovascular pulmonary resuscitation", keywords="near-peer mentoring", keywords="education", keywords="high school students", abstract="Background: High-quality and engaging cardiopulmonary resuscitation (CPR) training of both health care professionals and members of the public is necessary to provide timely and effective CPR to maximize survival and minimize injuries. Virtual reality (VR) is a novel method to enhance CPR engagement and training. However, a near-peer mentoring framework has not been applied in such training to date. Objective: The purpose of this pilot qualitative study was to understand the acceptability and feasibility of using VR technology to introduce basic life support (BLS) to high school students reinforced by near-peer coaching. Methods: Dyads of high school students underwent BLS training in CPR using a VR experience reinforced by the near-peer mentoring model. Focus group interviews were performed following the intervention. The interview sessions were recorded, transcribed verbatim, and subjected to thematic analysis. VR software data were analyzed after five cycles of chest compressions between the two participants. Results: The overwhelming responses from the three dyads of high school students indicated positive acceptance of learning CPR using VR. Analysis of emerging themes revealed three main categories of barriers and facilitators: (1) motivation to learn CPR, (2) CPR learning modality, and (3) coaching CPR content. These themes supported the theoretical framework of an ``intention-focused'' paradigm leading to acquiring the skills needed to perform CPR and ultimately increasing the chances of a bystander performing CPR. Conclusions: This study highlights the potential for training a unique population to increase bystander effects using novel VR technology coupled with a near-peer mentoring method. Further research is warranted to measure the outcome of the knowledge attained and the intention to perform CPR by high school students who participate in CPR education using VR and a near-peer mentoring method. ", doi="10.2196/53212", url="https://xr.jmir.org/2024/1/e53212" } @Article{info:doi/10.2196/34629, author="Sharrad, Kelsey and Martini, Caitlin and Tai, Andrew and Spurrier, Nicola and Smith, Ross and Esterman, Adrian and Gwilt, Ian and Sandford, Debra and Carson-Chahhoud, Kristin", title="Mixed Reality Technology to Deliver Psychological Interventions to Adolescents With Asthma: Qualitative Study Using the Theoretical Framework of Acceptability", journal="JMIR Hum Factors", year="2023", month="Jul", day="26", volume="10", pages="e34629", keywords="asthma", keywords="augmented reality", keywords="virtual reality", keywords="mixed reality", keywords="psychological distress", keywords="adolescent", keywords="cognitive behavioral therapies", keywords="mental health", abstract="Background: Interactive, mixed reality technologies such as augmented reality, virtual reality, and holographic technology may provide a novel solution to fast-track the translation of evidence into practice. They may also help overcome barriers to both mental health and asthma management service uptake, such as cost, availability of appointments, fear of judgment, and quality of care. Objective: This study aimed to investigate if mixed reality technology is an acceptable mechanism for the delivery of a component of cognitive and behavioral therapies for the management of elevated psychological distress among young people with asthma. Methods: To explore the perceived acceptability of these technologies, mixed reality tools were evaluated via qualitative, 1-on-1 interviews with young people with asthma and symptoms of psychological distress, parents/caregivers of young people with asthma and symptoms of psychological distress, and relevant health professionals. The Theoretical Framework of Acceptability was used for the deductive coding of the recorded interview transcripts. Results: This study enrolled the following participants: (1) 3 adolescents with asthma and symptoms of psychological distress with a mean age of 14 (SD 1.7) years; (2) 4 parents/caregivers of adolescents with asthma with a mean age of 55 (SD 14.6) years; and (3) 6 health professionals with a mean age of 40.8 (SD 4.3) years. A total of 4 constructs---experienced affective attitude, experienced effectiveness, self-efficacy, and intervention coherence---were coded in all participant transcripts. The most frequently coded constructs were experienced affective attitude and intervention coherence, which were reported a total of 96 times. The least frequently coded construct was anticipated opportunity cost, which was reported a total of 5 times. Participants were mostly positive about the mixed reality resources. However, some concerns were raised regarding ethicality, particularly regarding privacy, accessibility, and messaging. Participants noted the need for technology to be used in conjunction with face-to-face engagement with health professionals and that some patients would respond to this type of delivery mechanism better than others. Conclusions: These results suggest that mixed reality technology to deliver psychological interventions may be an acceptable addition to current health care practices for young people with asthma and symptoms of psychological distress. Trial Registration: Australia and New Zealand Clinical Trials Registry ACTRN12620001109998; https://anzctr.org.au/Trial/Registration/TrialReview.aspx?id=380427 ", doi="10.2196/34629", url="https://humanfactors.jmir.org/2023/1/e34629", url="http://www.ncbi.nlm.nih.gov/pubmed/37494096" } @Article{info:doi/10.2196/41220, author="Hellec, Justine and Hayotte, Meggy and Chorin, Fr{\'e}d{\'e}ric and Colson, S. Serge and d'Arripe-Longueville, Fabienne", title="Applying the UTAUT2 Model to Smart Eyeglasses to Detect and Prevent Falls Among Older Adults and Examination of Associations With Fall-Related Functional Physical Capacities: Survey Study", journal="J Med Internet Res", year="2023", month="May", day="12", volume="25", pages="e41220", keywords="Unified Theory of Acceptance and Use of Technology 2", keywords="fall prevention", keywords="fall detection", keywords="older people", keywords="older adults", keywords="facilitating conditions", keywords="effort expectancy", keywords="smart eyeglasses", abstract="Background: As people age, their physical capacities (eg, walking and balance) decline and the risk of falling rises. Yet, classic fall detection devices are poorly accepted by older adults. Because they often wear eyeglasses as they go about their daily activities, daily monitoring to detect and prevent falls with smart eyeglasses might be more easily accepted. Objective: On the basis of the Unified Theory of Acceptance and Use of Technology 2 (UTAUT2), this study evaluated (1) the acceptability of smart eyeglasses for the detection and prevention of falls by older adults and (2) the associations with selected fall-related functional physical capacities. Methods: A total of 142 volunteer older adults (mean age 74.9 years, SD 6.5 years) completed the UTAUT2 questionnaire adapted for smart eyeglasses and then performed several physical tests: a unipodal balance test with eyes open and closed, a 10-m walk test, and a 6-minute walk test. An unsupervised analysis classified the participants into physical performance groups. Multivariate ANOVAs were performed to identify differences in acceptability constructs according to the performance group. Results: The UTAUT2 questionnaire adapted for eyeglasses presented good psychometric properties. Performance expectancy ($\beta$=.21, P=.005), social influence ($\beta$=.18, P=.007), facilitating conditions ($\beta$=.17, P=.04), and habit ($\beta$=.40, P<.001) were significant contributors to the behavioral intention to use smart eyeglasses (R{\texttwosuperior}=0.73). The unsupervised analysis based on fall-related functional physical capacities created 3 groups of physical performance: low, intermediate, and high. Effort expectancy in the low performance group (mean 3.99, SD 1.46) was lower than that in the other 2 groups (ie, intermediate: mean 4.68, SD 1.23; high: mean 5.09, SD 1.41). Facilitating conditions in the high performance group (mean 5.39, SD 1.39) were higher than those in the other 2 groups (ie, low: mean 4.31, SD 1.68; intermediate: mean 4.66, SD 1.51). Conclusions: To our knowledge, this study is the first to examine the acceptability of smart eyeglasses in the context of fall detection and prevention in older adults and to associate acceptability with fall-related functional physical capacities. The older adults with higher physical performances, and possibly lower risks of falling, reported greater acceptability of smart eyeglasses for fall prevention and detection than their counterparts exhibiting low physical performances. ", doi="10.2196/41220", url="https://www.jmir.org/2023/1/e41220", url="http://www.ncbi.nlm.nih.gov/pubmed/37171835" } @Article{info:doi/10.2196/43240, author="Lundin, M. Robert and Yeap, Yuhern and Menkes, B. David", title="Adverse Effects of Virtual and Augmented Reality Interventions in Psychiatry: Systematic Review", journal="JMIR Ment Health", year="2023", month="May", day="5", volume="10", pages="e43240", keywords="virtual reality", keywords="augmented reality", keywords="mental health", keywords="side effects", keywords="adverse events", keywords="hardware", keywords="VR", keywords="software", keywords="AR", keywords="cybersickness", keywords="reporting standards", abstract="Background: Virtual reality (VR) and augmented reality (AR) are emerging treatment modalities in psychiatry, which are capable of producing clinical outcomes broadly comparable to those achieved with standard psychotherapies. Objective: Because the side effect profile associated with the clinical use of VR and AR remains largely unknown, we systematically reviewed available evidence of their adverse effects. Methods: A systematic review was conducted in accordance with the PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) framework across 3 mental health databases (PubMed, PsycINFO, and Embase) to identify VR and AR interventions targeting mental health diagnoses. Results: Of 73 studies meeting the inclusion criteria, 7 reported worsening clinical symptoms or an increased fall risk. Another 21 studies reported ``no adverse effects'' but failed to identify obvious adverse effects, mainly cybersickness, documented in their results. More concerningly, 45 of the 73 studies made no mention of adverse effects whatsoever. Conclusions: An appropriate screening tool would help ensure that VR adverse effects are correctly identified and reported. ", doi="10.2196/43240", url="https://mental.jmir.org/2023/1/e43240", url="http://www.ncbi.nlm.nih.gov/pubmed/37145841" } @Article{info:doi/10.2196/45464, author="Dinh, Alana and Yin, Lukas Andrew and Estrin, Deborah and Greenwald, Peter and Fortenko, Alexander", title="Augmented Reality in Real-time Telemedicine and Telementoring: Scoping Review", journal="JMIR Mhealth Uhealth", year="2023", month="Apr", day="18", volume="11", pages="e45464", keywords="augmented reality", keywords="telemedicine", keywords="telehealth", keywords="telementoring", keywords="teleguidance", keywords="telecommunication", keywords="teleconsultation", keywords="telecollaboration", keywords="scoping review", keywords="mobile phone", abstract="Background: Over the last decade, augmented reality (AR) has emerged in health care as a tool for visualizing data and enhancing simulation learning. AR, which has largely been explored for communication and collaboration in nonhealth contexts, could play a role in shaping future remote medical services and training. This review summarized existing studies implementing AR in real-time telemedicine and telementoring to create a foundation for health care providers and technology developers to understand future opportunities in remote care and education. Objective: This review described devices and platforms that use AR for real-time telemedicine and telementoring, the tasks for which AR was implemented, and the ways in which these implementations were evaluated to identify gaps in research that provide opportunities for further study. Methods: We searched PubMed, Scopus, Embase, and MEDLINE to identify English-language studies published between January 1, 2012, and October 18, 2022, implementing AR technology in a real-time interaction related to telemedicine or telementoring. The search terms were ``augmented reality'' OR ``AR'' AND ``remote'' OR ``telemedicine'' OR ``telehealth'' OR ``telementoring.'' Systematic reviews, meta-analyses, and discussion-based articles were excluded from analysis. Results: A total of 39 articles met the inclusion criteria and were categorized into themes of patient evaluation, medical intervention, and education. In total, 20 devices and platforms using AR were identified, with common features being the ability for remote users to annotate, display graphics, and display their hands or tools in the local user's view. Common themes across the studies included consultation and procedural education, with surgery, emergency, and hospital medicine being the most represented specialties. Outcomes were most often measured using feedback surveys and interviews. The most common objective measures were time to task completion and performance. Long-term outcome and resource cost measurements were rare. Across the studies, user feedback was consistently positive for perceived efficacy, feasibility, and acceptability. Comparative trials demonstrated that AR-assisted conditions had noninferior reliability and performance and did not consistently extend procedure times compared with in-person controls. Conclusions: Studies implementing AR in telemedicine and telementoring demonstrated the technology's ability to enhance access to information and facilitate guidance in multiple health care settings. However, AR's role as an alternative to current telecommunication platforms or even in-person interactions remains to be validated, with many disciplines and provider-to-nonprovider uses still lacking robust investigation. Additional studies comparing existing methods may offer more insight into this intersection, but the early stage of technical development and the lack of standardized tools and adoption have hindered the conduct of larger longitudinal and randomized controlled trials. Overall, AR has the potential to complement and advance the capabilities of remote medical care and learning, creating unique opportunities for innovator, provider, and patient involvement. ", doi="10.2196/45464", url="https://mhealth.jmir.org/2023/1/e45464", url="http://www.ncbi.nlm.nih.gov/pubmed/37071458" } @Article{info:doi/10.2196/42709, author="Worlikar, Hemendra and Coleman, Sean and Kelly, Jack and O'Connor, Sadhbh and Murray, Aoife and McVeigh, Terri and Doran, Jennifer and McCabe, Ian and O'Keeffe, Derek", title="Mixed Reality Platforms in Telehealth Delivery: Scoping Review", journal="JMIR Biomed Eng", year="2023", month="Mar", day="24", volume="8", pages="e42709", keywords="augmented reality", keywords="virtual reality", keywords="mixed realities", keywords="telemedicine", keywords="eHealth", keywords="mobile health", keywords="mHealth", abstract="Background: The distinctive features of the digital reality platforms, namely augmented reality (AR), virtual reality (VR), and mixed reality (MR) have extended to medical education, training, simulation, and patient care. Furthermore, this digital reality technology seamlessly merges with information and communication technology creating an enriched telehealth ecosystem. This review provides a composite overview of the prospects of telehealth delivered using the MR platform in clinical settings. Objective: This review identifies various clinical applications of high-fidelity digital display technology, namely AR, VR, and MR, delivered using telehealth capabilities. Next, the review focuses on the technical characteristics, hardware, and software technologies used in the composition of AR, VR, and MR in telehealth. Methods: We conducted a scoping review using the methodological framework and reporting design using the PRISMA-ScR (Preferred Reporting Items for Systematic Reviews and Meta-Analyses Extension for Scoping Reviews) guidelines. Full-length articles in English were obtained from the Embase, PubMed, and Web of Science databases. The search protocol was based on the following keywords and Medical Subject Headings to obtain relevant results: ``augmented reality,'' ``virtual reality,'' ``mixed-reality,'' ``telemedicine,'' ``telehealth,'' and ``digital health.'' A predefined inclusion-exclusion criterion was developed in filtering the obtained results and the final selection of the articles, followed by data extraction and construction of the review. Results: We identified 4407 articles, of which 320 were eligible for full-text screening. A total of 134 full-text articles were included in the review. Telerehabilitation, telementoring, teleconsultation, telemonitoring, telepsychiatry, telesurgery, and telediagnosis were the segments of the telehealth division that explored the use of AR, VR, and MR platforms. Telerehabilitation using VR was the most commonly recurring segment in the included studies. AR and MR has been mainly used for telementoring and teleconsultation. The most important technical features of digital reality technology to emerge with telehealth were virtual environment, exergaming, 3D avatars, telepresence, anchoring annotations, and first-person viewpoint. Different arrangements of technology---3D modeling and viewing tools, communication and streaming platforms, file transfer and sharing platforms, sensors, high-fidelity displays, and controllers---formed the basis of most systems. Conclusions: This review constitutes a recent overview of the evolving digital AR and VR in various clinical applications using the telehealth setup. This combination of telehealth with AR, VR, and MR allows for remote facilitation of clinical expertise and further development of home-based treatment. This review explores the rapidly growing suite of technologies available to users within the digital health sector and examines the opportunities and challenges they present. ", doi="10.2196/42709", url="https://biomedeng.jmir.org/2023/1/e42709", url="http://www.ncbi.nlm.nih.gov/pubmed/38875694" } @Article{info:doi/10.2196/42145, author="Kim, Chan Joo and Saguna, Saguna and {\AA}hlund, Christer", title="Acceptability of a Health Care App With 3 User Interfaces for Older Adults and Their Caregivers: Design and Evaluation Study", journal="JMIR Hum Factors", year="2023", month="Mar", day="8", volume="10", pages="e42145", keywords="Internet of Things", keywords="health monitoring", keywords="older adults", keywords="augmented reality", keywords="user experience", keywords="independent living", keywords="design study", keywords="mobile phone", abstract="Background: The older population needs solutions for independent living and reducing the burden on caregivers while maintaining the quality and dignity of life. Objective: The aim of this study was to design, develop, and evaluate an older adult health care app that supports trained caregivers (ie, formal caregivers) and relatives (ie, informal caregivers). We aimed to identify the factors that affect user acceptance of interfaces depending on the user's role. Methods: We designed and developed an app with 3 user interfaces that enable remote sensing of an older adult's daily activities and behaviors. We conducted user evaluations (N=25) with older adults and their formal and informal caregivers to obtain an overall impression of the health care monitoring app in terms of user experience and usability. In our design study, the participants had firsthand experience with our app, followed by a questionnaire and individual interview to express their opinions on the app. Through the interview, we also identified their views on each user interface and interaction modality to identify the relationship between the user's role and their acceptance of a particular interface. The questionnaire answers were statistically analyzed, and we coded the interview answers based on keywords related to a participant's experience, for example, ease of use and usefulness. Results: We obtained overall positive results in the user evaluation of our app regarding key aspects such as efficiency, perspicuity, dependability, stimulation, and novelty, with an average between 1.74 (SD 1.02) and 2.18 (SD 0.93) on a scale of ?3.0 to 3.0. The overall impression of our app was favorable, and we identified that ``simple'' and ``intuitive'' were the main factors affecting older adults' and caregivers' preference for the user interface and interaction modality. We also identified a positive user acceptance of the use of augmented reality by 91\% (10/11) of the older adults to share information with their formal and informal caregivers. Conclusions: To address the need for a study to evaluate the user experience and user acceptance by older adults as well as both formal and informal caregivers regarding the user interfaces with multimodal interaction in the context of health monitoring, we designed, developed, and conducted user evaluations with the target user groups. Our results through this design study show important implications for designing future health monitoring apps with multiple interaction modalities and intuitive user interfaces in the older adult health care domain. ", doi="10.2196/42145", url="https://humanfactors.jmir.org/2023/1/e42145", url="http://www.ncbi.nlm.nih.gov/pubmed/36884275" } @Article{info:doi/10.2196/34958, author="O'Connor, Antonia and Sharrad, Kelsey and King, Charmaine and Carson-Chahhoud, Kristin", title="An Augmented Reality Technology to Provide Demonstrative Inhaler Technique Education for Patients With Asthma: Interview Study Among Patients, Health Professionals, and Key Community Stakeholders", journal="JMIR Form Res", year="2023", month="Mar", day="2", volume="7", pages="e34958", keywords="augmented reality", keywords="asthma", keywords="disease management", keywords="smartphone", keywords="inhaler technique", keywords="mobile phone", abstract="Background: Many people with asthma use incorrect inhaler technique, resulting in suboptimal disease management and increased health service use. Novel ways of delivering appropriate instructions are needed. Objective: This study explored stakeholder perspectives on the potential use of augmented reality (AR) technology to improve asthma inhaler technique education. Methods: On the basis of existing evidence and resources, an information poster displaying the images of 22 asthma inhaler devices was developed. Using AR technology via a free smartphone app, the poster launched video demonstrations of correct inhaler technique for each device. In total, 21 semistructured, one?on?one interviews with health professionals, people with asthma, and key community stakeholders were conducted, and data were analyzed thematically using the Triandis model of interpersonal behavior. Results: A total of 21 participants were recruited into the study, and data saturation was achieved. People with asthma were confident with inhaler technique (mean score 9.17, SD 1.33, out of 10). However, health professionals and key community stakeholders identified that this perception was misguided (mean 7.25, SD 1.39, and mean 4.5, SD 0.71, for health professionals and key community stakeholders, respectively) and facilitates persistent incorrect inhaler use and suboptimal disease management. Delivering inhaler technique education using AR was favored by all participants (21/21, 100\%), particularly around ease of use, with the ability to visually display inhaler techniques for each device. There was a strongly held belief that the technology has the capacity for improving inhaler technique across all participant groups (mean 9.25, SD 0.89, for participants; mean 9.83, SD 0.41, for health professionals; and mean 9.5, SD 0.71, for key community stakeholders). However, all participants (21/21, 100\%) identified some barriers, particularly regarding access and appropriateness of AR for older people. Conclusions: AR technology may be a novel means to address poor inhaler technique among certain cohorts of patients with asthma and serve as a prompt for health professionals to initiate review of inhaler devices. A randomized controlled trial design is needed to evaluate the efficacy of this technology for use in the clinical care setting. ", doi="10.2196/34958", url="https://formative.jmir.org/2023/1/e34958", url="http://www.ncbi.nlm.nih.gov/pubmed/36862496" } @Article{info:doi/10.2196/44161, author="Zhang, Zhan and Bai, Enze and Joy, Karen and Ghelaa, Naressh Partth and Adelgais, Kathleen and Ozkaynak, Mustafa", title="Smart Glasses for Supporting Distributed Care Work: Systematic Review", journal="JMIR Med Inform", year="2023", month="Feb", day="28", volume="11", pages="e44161", keywords="smart glass", keywords="care coordination", keywords="telemedicine", keywords="distributed teamwork", keywords="mobile phone", abstract="Background: Over the past 2 decades, various desktop and mobile telemedicine systems have been developed to support communication and care coordination among distributed medical teams. However, in the hands-busy care environment, such technologies could become cumbersome because they require medical professionals to manually operate them. Smart glasses have been gaining momentum because of their advantages in enabling hands-free operation and see-what-I-see video-based consultation. Previous research has tested this novel technology in different health care settings. Objective: The aim of this study was to review how smart glasses were designed, used, and evaluated as a telemedicine tool to support distributed care coordination and communication, as well as highlight the potential benefits and limitations regarding medical professionals' use of smart glasses in practice. Methods: We conducted a literature search in 6 databases that cover research within both health care and computer science domains. We used the PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) methodology to review articles. A total of 5865 articles were retrieved and screened by 3 researchers, with 21 (0.36\%) articles included for in-depth analysis. Results: All of the reviewed articles (21/21, 100\%) used off-the-shelf smart glass device and videoconferencing software, which had a high level of technology readiness for real-world use and deployment in care settings. The common system features used and evaluated in these studies included video and audio streaming, annotation, augmented reality, and hands-free interactions. These studies focused on evaluating the technical feasibility, effectiveness, and user experience of smart glasses. Although the smart glass technology has demonstrated numerous benefits and high levels of user acceptance, the reviewed studies noted a variety of barriers to successful adoption of this novel technology in actual care settings, including technical limitations, human factors and ergonomics, privacy and security issues, and organizational challenges. Conclusions: User-centered system design, improved hardware performance, and software reliability are needed to realize the potential of smart glasses. More research is needed to examine and evaluate medical professionals' needs, preferences, and perceptions, as well as elucidate how smart glasses affect the clinical workflow in complex care environments. Our findings inform the design, implementation, and evaluation of smart glasses that will improve organizational and patient outcomes. ", doi="10.2196/44161", url="https://medinform.jmir.org/2023/1/e44161", url="http://www.ncbi.nlm.nih.gov/pubmed/36853760" } @Article{info:doi/10.2196/40651, author="Stammler, Britta and Flammer, Kathrin and Schuster, Thomas and Lambert, Marian and Karnath, Hans-Otto", title="Negami: An Augmented Reality App for the Treatment of Spatial Neglect After Stroke", journal="JMIR Serious Games", year="2023", month="Feb", day="27", volume="11", pages="e40651", keywords="spatial neglect", keywords="gamification", keywords="augmented reality", keywords="visual exploration training", keywords="stroke rehabilitation", keywords="serious games", keywords="rehabilitation", keywords="stroke", abstract="Background: A widely applied and effective rehabilitation method for patients experiencing spatial neglect after a stroke is ``visual exploration training.'' Patients improve their ipsilesional bias of attention and orientation by training exploration movements and search strategies toward the contralesional side of space. In this context, gamification can have a positive influence on motivation for treatment and thus on the success of treatment. In contrast to virtual reality applications, treatment enhancements through augmented reality (AR) have not yet been investigated, although they offer some advantages over virtual reality. Objective: This study aimed to develop an AR-based app (Negami) for the treatment of spatial neglect that combines visual exploration training with active, contralesionally oriented rotation of the eyes, head, and trunk. Methods: The app inserts a virtual element (origami bird) into the real space surrounding the patient, which the patient explores with the camera of a tablet. Subjective reports from healthy elderly participants (n=10) and patients with spatial neglect after stroke (n=10) who trained with the new Negami app were analyzed. Usability, side effects, and game experience were assessed by various questionnaires. Results: Training at the highest defined difficulty level was perceived as differently challenging but not as frustrating by the group of healthy elderly participants. The app was rated with high usability, hardly any side effects, high motivation, and entertainment. The group of patients with spatial neglect after stroke consistently evaluated the app positively on the dimensions of motivation, satisfaction, and fun. Conclusions: The Negami app represents a promising extension by adding AR to traditional exploration training for spatial neglect. Through participants' natural interaction with the physical surrounding environment during playful tasks, side effects as symptoms of cybersickness are minimized and patients' motivation appeared to markedly increase. The use of AR in cognitive rehabilitation programs and the treatment of spatial neglect seems promising and should receive further investigation. ", doi="10.2196/40651", url="https://games.jmir.org/2023/1/e40651", url="http://www.ncbi.nlm.nih.gov/pubmed/36848215" } @Article{info:doi/10.2196/40582, author="Shu, Sara and Woo, P. Benjamin K.", title="Pioneering the Metaverse: The Role of the Metaverse in an Aging Population", journal="JMIR Aging", year="2023", month="Jan", day="20", volume="6", pages="e40582", keywords="metaverse", keywords="older adult", keywords="aging in place", keywords="dementia", keywords="gerontology", keywords="geriatric", keywords="digital health", keywords="digital technology", keywords="computer generated", keywords="artificial intelligence", keywords="virtual reality", keywords="mixed reality", keywords="augmented reality", keywords="aging", keywords="mental health", doi="10.2196/40582", url="https://aging.jmir.org/2023/1/e40582", url="http://www.ncbi.nlm.nih.gov/pubmed/36662547" } @Article{info:doi/10.2196/42040, author="Mai, Hang-Nga and Dam, Viet Van and Lee, Du-Hyeong", title="Accuracy of Augmented Reality--Assisted Navigation in Dental Implant Surgery: Systematic Review and Meta-analysis", journal="J Med Internet Res", year="2023", month="Jan", day="4", volume="25", pages="e42040", keywords="augmented reality", keywords="accuracy", keywords="computer-guided surgery", keywords="dental implants", keywords="systematic review", keywords="meta-analysis", abstract="Background: The novel concept of immersive 3D augmented reality (AR) surgical navigation has recently been introduced in the medical field. This method allows surgeons to directly focus on the surgical objective without having to look at a separate monitor. In the dental field, the recently developed AR-assisted dental implant navigation system (AR navigation), which uses innovative image technology to directly visualize and track a presurgical plan over an actual surgical site, has attracted great interest. Objective: This study is the first systematic review and meta-analysis study that aimed to assess the accuracy of dental implants placed by AR navigation and compare it with that of the widely used implant placement methods, including the freehand method (FH), template-based static guidance (TG), and conventional navigation (CN). Methods: Individual search strategies were used in PubMed (MEDLINE), Scopus, ScienceDirect, Cochrane Library, and Google Scholar to search for articles published until March 21, 2022. This study was performed in accordance with the PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) guidelines and registered in the International Prospective Register of Systematic Reviews (PROSPERO) database. Peer-reviewed journal articles evaluating the positional deviations of dental implants placed using AR-assisted implant navigation systems were included. Cohen d statistical power analysis was used to investigate the effect size estimate and CIs of standardized mean differences (SMDs) between data sets. Results: Among the 425 articles retrieved, 15 articles were considered eligible for narrative review, 8 articles were considered for single-arm meta-analysis, and 4 were included in a 2-arm meta-analysis. The mean lateral, global, depth, and angular deviations of the dental implant placed using AR navigation were 0.90 (95\% CI 0.78-1.02) mm, 1.18 (95\% CI 0.95-1.41) mm, 0.78 (95\% CI 0.48-1.08) mm, and 3.96{\textdegree} (95\% CI 3.45{\textdegree}-4.48{\textdegree}), respectively. The accuracy of AR navigation was significantly higher than that of the FH method (SMD=?1.01; 95\% CI ?1.47 to ?0.55; P<.001) and CN method (SMD=?0.46; 95\% CI ?0.64 to ?0.29; P<.001). However, the accuracies of the AR navigation and TG methods were similar (SMD=0.06; 95\% CI ?0.62 to 0.74; P=.73). Conclusions: The positional deviations of AR-navigated implant placements were within the safety zone, suggesting clinically acceptable accuracy of the AR navigation method. Moreover, the accuracy of AR implant navigation was comparable with that of the highly recommended dental implant--guided surgery method, TG, and superior to that of the conventional FH and CN methods. This review highlights the possibility of using AR navigation as an effective and accurate immersive surgical guide for dental implant placement. ", doi="10.2196/42040", url="https://www.jmir.org/2023/1/e42040", url="http://www.ncbi.nlm.nih.gov/pubmed/36598798" } @Article{info:doi/10.2196/36695, author="Xu, Nuo and Chen, Sijing and Liu, Yan and Jing, Yuewen and Gu, Ping", title="The Effects of Virtual Reality in Maternal Delivery: Systematic Review and Meta-analysis", journal="JMIR Serious Games", year="2022", month="Nov", day="23", volume="10", number="4", pages="e36695", keywords="virtual reality technology", keywords="delivery", keywords="labor pain", keywords="anxiety", keywords="meta-analysis", keywords="systematic review", keywords="pain", keywords="pregnancy", keywords="virtual reality", keywords="maternity", keywords="labor", keywords="technology", keywords="pregnant women", keywords="review", keywords="childbirth", keywords="mental health", abstract="Background: Extreme labor pain has negative effects; pharmacologic analgesic modalities are effective but are accompanied by adverse effects. Virtual reality (VR) works as a distracting nonpharmacologic intervention for pain and anxiety relief; however, the effects of VR use in laboring women is unknown. Objective: Our study aimed to determine the safety and effectiveness of VR technology during labor and delivery and investigate whether it impacts labor and patient satisfaction. Methods: In all, 7 databases (PubMed, Embase, Web of Science, the Cochrane Library, CINAHL, China National Knowledge Infrastructure, and Wan-Fang Database) were systematically searched for randomized controlled trials of VR use in pregnancy and childbirth from the time of database construction until November 24, 2021. Two researchers extracted data and evaluated study quality using the Cochrane Risk of Bias tool 2.0. Outcome measures were labor pain, anxiety, duration, satisfaction, and adverse events. Meta-analyses were performed where possible. Results: A total of 12 studies with 1095 participants were included, of which 1 and 11 studies were rated as ``Low risk'' and ``Some concerns'' for risk of bias, respectively. Of the 12 studies, 11 reported labor pain, 7 reported labor anxiety, and 4 reported labor duration. Meta-analysis revealed that VR use could relieve pain during labor (mean difference --1.81, 95\% CI --2.04 to --1.57; P<.001) and the active period (standardized mean difference [SMD] --0.41, 95\% CI --0.68 to --0.14; P=.003); reduce anxiety (SMD --1.39, 95\% CI --1.99 to --0.78; P<.001); and improve satisfaction with delivery (relative risk 1.32, 95\% CI 1.10-1.59; P=.003). The effects of VR on the duration of the first (SMD --1.12, 95\% CI --2.38 to 0.13; P=.08) and second (SMD --0.22, 95\% CI --0.67 to 0.24; P=.35) stages of labor were not statistically significant. Conclusions: VR is safe and effective in relieving maternal labor pain and anxiety; however, due to the heterogeneity among studies conducted to date, more rigorous, large-scale, and standardized randomized controlled trials are required to provide a higher-quality evidence base for the use of VR technology in maternal labor, with the aim of improving experience and outcomes. Trial Registration: PROSPERO CRD42021295410; https://www.crd.york.ac.uk/prospero/display\_record.php?RecordID=295410 ", doi="10.2196/36695", url="https://games.jmir.org/2022/4/e36695", url="http://www.ncbi.nlm.nih.gov/pubmed/36416881" } @Article{info:doi/10.2196/34501, author="Eves, Joshua and Sudarsanam, Abhilash and Shalhoub, Joseph and Amiras, Dimitri", title="Augmented Reality in Vascular and Endovascular Surgery: Scoping Review", journal="JMIR Serious Games", year="2022", month="Sep", day="23", volume="10", number="3", pages="e34501", keywords="augmented reality", keywords="surgery", keywords="vascular", keywords="endovascular", keywords="head-mounted display", keywords="mobile phone", abstract="Background: Technological advances have transformed vascular intervention in recent decades. In particular, improvements in imaging and data processing have allowed for the development of increasingly complex endovascular and hybrid interventions. Augmented reality (AR) is a subject of growing interest in surgery, with the potential to improve clinicians' understanding of 3D anatomy and aid in the processing of real-time information. This study hopes to elucidate the potential impact of AR technology in the rapidly evolving fields of vascular and endovascular surgery. Objective: The aim of this review is to summarize the fundamental concepts of AR technologies and conduct a scoping review of the impact of AR and mixed reality in vascular and endovascular surgery. Methods: A systematic search of MEDLINE, Scopus, and Embase was performed in accordance with the PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) guidelines. All studies written in English from inception until January 8, 2021, were included in the search. Combinations of the following keywords were used in the systematic search string: (``augmented reality'' OR ``hololens'' OR ``image overlay'' OR ``daqri'' OR ``magic leap'' OR ``immersive reality'' OR ``extended reality'' OR ``mixed reality'' OR ``head mounted display'') AND (``vascular surgery'' OR ``endovascular''). Studies were selected through a blinded process between 2 investigators (JE and AS) and assessed using data quality tools. Results: AR technologies have had a number of applications in vascular and endovascular surgery. Most studies (22/32, 69\%) used 3D imaging of computed tomography angiogram--derived images of vascular anatomy to augment clinicians' anatomical understanding during procedures. A wide range of AR technologies were used, with heads up fusion imaging and AR head-mounted displays being the most commonly applied clinically. AR applications included guiding open, robotic, and endovascular surgery while minimizing dissection, improving procedural times, and reducing radiation and contrast exposure. Conclusions: AR has shown promising developments in the field of vascular and endovascular surgery, with potential benefits to surgeons and patients alike. These include reductions in patient risk and operating times as well as in contrast and radiation exposure for radiological interventions. Further technological advances are required to overcome current limitations, including processing capacity and vascular deformation by instrumentation. ", doi="10.2196/34501", url="https://games.jmir.org/2022/3/e34501", url="http://www.ncbi.nlm.nih.gov/pubmed/36149736" } @Article{info:doi/10.2196/36850, author="Jeong, Heejin and Bayro, Allison and Umesh, Patipati Sai and Mamgain, Kaushal and Lee, Moontae", title="Social Media Users' Perceptions of a Wearable Mixed Reality Headset During the COVID-19 Pandemic: Aspect-Based Sentiment Analysis", journal="JMIR Serious Games", year="2022", month="Aug", day="4", volume="10", number="3", pages="e36850", keywords="HoloLens 2", keywords="sentiment analysis", keywords="natural language processing, Twitter", keywords="COVID-19", keywords="usability evaluation", abstract="Background: Mixed reality (MR) devices provide real-time environments for physical-digital interactions across many domains. Owing to the unprecedented COVID-19 pandemic, MR technologies have supported many new use cases in the health care industry, enabling social distancing practices to minimize the risk of contact and transmission. Despite their novelty and increasing popularity, public evaluations are sparse and often rely on social interactions among users, developers, researchers, and potential buyers. Objective: The purpose of this study is to use aspect-based sentiment analysis to explore changes in sentiment during the onset of the COVID-19 pandemic as new use cases emerged in the health care industry; to characterize net insights for MR developers, researchers, and users; and to analyze the features of HoloLens 2 (Microsoft Corporation) that are helpful for certain fields and purposes. Methods: To investigate the user sentiment, we collected 8492 tweets on a wearable MR headset, HoloLens 2, during the initial 10 months since its release in late 2019, coinciding with the onset of the pandemic. Human annotators rated the individual tweets as positive, negative, neutral, or inconclusive. Furthermore, by hiring an interannotator to ensure agreements between the annotators, we used various word vector representations to measure the impact of specific words on sentiment ratings. Following the sentiment classification for each tweet, we trained a model for sentiment analysis via supervised learning. Results: The results of our sentiment analysis showed that the bag-of-words tokenizing method using a random forest supervised learning approach produced the highest accuracy of the test set at 81.29\%. Furthermore, the results showed an apparent change in sentiment during the COVID-19 pandemic period. During the onset of the pandemic, consumer goods were severely affected, which aligns with a drop in both positive and negative sentiment. Following this, there is a sudden spike in positive sentiment, hypothesized to be caused by the new use cases of the device in health care education and training. This pandemic also aligns with drastic changes in the increased number of practical insights for MR developers, researchers, and users and positive net sentiments toward the HoloLens 2 characteristics. Conclusions: Our approach suggests a simple yet effective way to survey public opinion about new hardware devices quickly. The findings of this study contribute to a holistic understanding of public perception and acceptance of MR technologies during the COVID-19 pandemic and highlight several new implementations of HoloLens 2 in health care. We hope that these findings will inspire new use cases and technological features. ", doi="10.2196/36850", url="https://games.jmir.org/2022/3/e36850", url="http://www.ncbi.nlm.nih.gov/pubmed/35708916" } @Article{info:doi/10.2196/38433, author="Heo, Sejin and Moon, Suhyeon and Kim, Minha and Park, Minsu and Cha, Chul Won and Son, Hi Meong", title="An Augmented Reality--Based Guide for Mechanical Ventilator Setup: Prospective Randomized Pilot Trial", journal="JMIR Serious Games", year="2022", month="Jul", day="22", volume="10", number="3", pages="e38433", keywords="augmented reality", keywords="mechanical ventilation education", keywords="medical education", keywords="critical care", keywords="medical training", keywords="virtual reality", keywords="virtual education", keywords="nurse", keywords="nursing education", keywords="nursing", keywords="health care professional", keywords="learning platform", keywords="digital learning", keywords="digital health", abstract="Background: Recently, the demand for mechanical ventilation (MV) has increased with the COVID-19 pandemic; however, the conventional approaches to MV training are resource intensive and require on-site training. Consequently, the need for independent learning platforms with remote assistance in institutions without resources has surged. Objective: This study aimed to determine the feasibility and effectiveness of an augmented reality (AR)--based self-learning platform for novices to set up a ventilator without on-site assistance. Methods: This prospective randomized controlled pilot study was conducted at Samsung Medical Center, Korea, from January to February 2022. Nurses with no prior experience of MV or AR were enrolled. We randomized the participants into 2 groups: manual and AR groups. Participants in the manual group used a printed manual and made a phone call for assistance, whereas participants in the AR group were guided by AR-based instructions and requested assistance with the head-mounted display. We compared the overall score of the procedure, required level of assistance, and user experience between the groups. Results: In total, 30 participants completed the entire procedure with or without remote assistance. Fewer participants requested assistance in the AR group compared to the manual group (7/15, 47.7\% vs 14/15, 93.3\%; P=.02). The number of steps that required assistance was also lower in the AR group compared to the manual group (n=13 vs n=33; P=.004). The AR group had a higher rating in predeveloped questions for confidence (median 3, IQR 2.50-4.00 vs median 2, IQR 2.00-3.00; P=.01), suitability of method (median 4, IQR 4.00-5.00 vs median 3, IQR 3.00-3.50; P=.01), and whether they intended to recommend AR systems to others (median 4, IQR 3.00-5.00 vs median 3, IQR 2.00-3.00; P=.002). Conclusions: AR-based instructions to set up a mechanical ventilator were feasible for novices who had no prior experience with MV or AR. Additionally, participants in the AR group required less assistance compared with those in the manual group, resulting in higher confidence after training. Trial Registration: ClinicalTrials.gov NCT05446896; https://beta.clinicaltrials.gov/study/NCT05446896 ", doi="10.2196/38433", url="https://games.jmir.org/2022/3/e38433", url="http://www.ncbi.nlm.nih.gov/pubmed/35867382" } @Article{info:doi/10.2196/32715, author="Baashar, Yahia and Alkawsi, Gamal and Ahmad, Wan Wan Nooraishya and Alhussian, Hitham and Alwadain, Ayed and Capretz, Fernando Luiz and Babiker, Areej and Alghail, Adnan", title="Effectiveness of Using Augmented Reality for Training in the Medical Professions: Meta-analysis", journal="JMIR Serious Games", year="2022", month="Jul", day="5", volume="10", number="3", pages="e32715", keywords="augmented reality", keywords="medical", keywords="training", keywords="virtual", keywords="meta-analysis", abstract="Background: Augmented reality (AR) is an interactive technology that uses persuasive digital data and real-world surroundings to expand the user's reality, wherein objects are produced by various computer applications. It constitutes a novel advancement in medical care, education, and training. Objective: The aim of this work was to assess how effective AR is in training medical students when compared to other educational methods in terms of skills, knowledge, confidence, performance time, and satisfaction. Methods: We performed a meta-analysis on the effectiveness of AR in medical training that was constructed by using the Cochrane methodology. A web-based literature search was performed by using the Cochrane Library, Web of Science, PubMed, and Embase databases to find studies that recorded the effect of AR in medical training up to April 2021. The quality of the selected studies was assessed by following the Cochrane criteria for risk of bias evaluations. Results: In total, 13 studies with a total of 654 participants were included in the meta-analysis. The findings showed that using AR in training can improve participants' performance time (I2=99.9\%; P<.001), confidence (I2=97.7\%; P=.02), and satisfaction (I2=99.8\%; P=.006) more than what occurs under control conditions. Further, AR did not have any effect on the participants' knowledge (I2=99.4\%; P=.90) and skills (I2=97.5\%; P=.10). The meta-regression plot shows that there has been an increase in the number of articles discussing AR over the years and that there is no publication bias in the studies used for the meta-analysis. Conclusions: The findings of this work suggest that AR can effectively improve performance time, satisfaction, and confidence in medical training but is not very effective in areas such as knowledge and skill. Therefore, more AR technologies should be implemented in the field of medical training and education. However, to confirm these findings, more meticulous research with more participants is needed. ", doi="10.2196/32715", url="https://games.jmir.org/2022/3/e32715", url="http://www.ncbi.nlm.nih.gov/pubmed/35787488" } @Article{info:doi/10.2196/34781, author="Puladi, Behrus and Ooms, Mark and Bellgardt, Martin and Cesov, Mark and Lipprandt, Myriam and Raith, Stefan and Peters, Florian and M{\"o}hlhenrich, Christian Stephan and Prescher, Andreas and H{\"o}lzle, Frank and Kuhlen, Wolfgang Torsten and Modabber, Ali", title="Augmented Reality-Based Surgery on the Human Cadaver Using a New Generation of Optical Head-Mounted Displays: Development and Feasibility Study", journal="JMIR Serious Games", year="2022", month="Apr", day="25", volume="10", number="2", pages="e34781", keywords="digital health in surgery", keywords="surgical technique", keywords="surgical training", keywords="computer-assisted surgery", keywords="optical see-through head-mounted display", keywords="HoloLens", keywords="surgical navigation", keywords="medical regulation", keywords="open-source", keywords="AR", keywords="augmented reality", keywords="surgery", keywords="surgeon", keywords="cadaver", keywords="serious game", keywords="head-mounted display", abstract="Background: Although nearly one-third of the world's disease burden requires surgical care, only a small proportion of digital health applications are directly used in the surgical field. In the coming decades, the application of augmented reality (AR) with a new generation of optical-see-through head-mounted displays (OST-HMDs) like the HoloLens (Microsoft Corp) has the potential to bring digital health into the surgical field. However, for the application to be performed on a living person, proof of performance must first be provided due to regulatory requirements. In this regard, cadaver studies could provide initial evidence. Objective: The goal of the research was to develop an open-source system for AR-based surgery on human cadavers using freely available technologies. Methods: We tested our system using an easy-to-understand scenario in which fractured zygomatic arches of the face had to be repositioned with visual and auditory feedback to the investigators using a HoloLens. Results were verified with postoperative imaging and assessed in a blinded fashion by 2 investigators. The developed system and scenario were qualitatively evaluated by consensus interview and individual questionnaires. Results: The development and implementation of our system was feasible and could be realized in the course of a cadaver study. The AR system was found helpful by the investigators for spatial perception in addition to the combination of visual as well as auditory feedback. The surgical end point could be determined metrically as well as by assessment. Conclusions: The development and application of an AR-based surgical system using freely available technologies to perform OST-HMD--guided surgical procedures in cadavers is feasible. Cadaver studies are suitable for OST-HMD--guided interventions to measure a surgical end point and provide an initial data foundation for future clinical trials. The availability of free systems for researchers could be helpful for a possible translation process from digital health to AR-based surgery using OST-HMDs in the operating theater via cadaver studies. ", doi="10.2196/34781", url="https://games.jmir.org/2022/2/e34781", url="http://www.ncbi.nlm.nih.gov/pubmed/35468090" } @Article{info:doi/10.2196/18222, author="Andrews, Anya", title="Integration of Augmented Reality and Brain-Computer Interface Technologies for Health Care Applications: Exploratory and Prototyping Study", journal="JMIR Form Res", year="2022", month="Apr", day="21", volume="6", number="4", pages="e18222", keywords="digital health", keywords="augmented reality", keywords="brain-computer interface", keywords="health professional education", keywords="clinical performance support", keywords="interprofessional teamwork", keywords="patient education", keywords="mHealth", keywords="mobile health", keywords="technology integration", abstract="Background: Augmented reality (AR) and brain-computer interface (BCI) are promising technologies that have a tremendous potential to revolutionize health care. While there has been a growing interest in these technologies for medical applications in the recent years, the combined use of AR and BCI remains a fairly unexplored area that offers significant opportunities for improving health care professional education and clinical practice. This paper describes a recent study to explore the integration of AR and BCI technologies for health care applications. Objective: The described effort aims to advance an understanding of how AR and BCI technologies can effectively work together to transform modern health care practice by providing new mechanisms to improve patient and provider learning, communication, and shared decision-making. Methods: The study methods included an environmental scan of AR and BCI technologies currently used in health care, a use case analysis for a combined AR-BCI capability, and development of an integrated AR-BCI prototype solution for health care applications. Results: The study resulted in a novel interface technology solution that enables interoperability between consumer-grade wearable AR and BCI devices and provides the users with an ability to control digital objects in augmented reality using neural commands. The article discusses this novel solution within the context of practical digital health use cases developed during the course of the study where the combined AR and BCI technologies are anticipated to produce the most impact. Conclusions: As one of the pioneering efforts in the area of AR and BCI integration, the study presents a practical implementation pathway for AR-BCI integration and provides directions for future research and innovation in this area. ", doi="10.2196/18222", url="https://formative.jmir.org/2022/4/e18222", url="http://www.ncbi.nlm.nih.gov/pubmed/35451963" } @Article{info:doi/10.2196/29594, author="Tudor Car, Lorainne and Kyaw, Myint Bhone and Teo, Andrew and Fox, Erlikh Tatiana and Vimalesvaran, Sunitha and Apfelbacher, Christian and Kemp, Sandra and Chavannes, Niels", title="Outcomes, Measurement Instruments, and Their Validity Evidence in Randomized Controlled Trials on Virtual, Augmented, and Mixed Reality in Undergraduate Medical Education: Systematic Mapping Review", journal="JMIR Serious Games", year="2022", month="Apr", day="13", volume="10", number="2", pages="e29594", keywords="virtual reality", keywords="augmented reality", keywords="mixed reality", keywords="outcomes", keywords="extended reality", keywords="digital education", keywords="randomized controlled trials", keywords="medical education", keywords="measurement instruments", abstract="Background: Extended reality, which encompasses virtual reality (VR), augmented reality (AR), and mixed reality (MR), is increasingly used in medical education. Studies assessing the effectiveness of these new educational modalities should measure relevant outcomes using outcome measurement tools with validity evidence. Objective: Our aim is to determine the choice of outcomes, measurement instruments, and the use of measurement instruments with validity evidence in randomized controlled trials (RCTs) on the effectiveness of VR, AR, and MR in medical student education. Methods: We conducted a systematic mapping review. We searched 7 major bibliographic databases from January 1990 to April 2020, and 2 reviewers screened the citations and extracted data independently from the included studies. We report our findings in line with the PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) guidelines. Results: Of the 126 retrieved RCTs, 115 (91.3\%) were on VR and 11 (8.7\%) were on AR. No RCT on MR in medical student education was found. Of the 115 studies on VR, 64 (55.6\%) were on VR simulators, 30 (26.1\%) on screen-based VR, 9 (7.8\%) on VR patient simulations, and 12 (10.4\%) on VR serious games. Most studies reported only a single outcome and immediate postintervention assessment data. Skills outcome was the most common outcome reported in studies on VR simulators (97\%), VR patient simulations (100\%), and AR (73\%). Knowledge was the most common outcome reported in studies on screen-based VR (80\%) and VR serious games (58\%). Less common outcomes included participants' attitudes, satisfaction, cognitive or mental load, learning efficacy, engagement or self-efficacy beliefs, emotional state, competency developed, and patient outcomes. At least one form of validity evidence was found in approximately half of the studies on VR simulators (55\%), VR patient simulations (56\%), VR serious games (58\%), and AR (55\%) and in a quarter of the studies on screen-based VR (27\%). Most studies used assessment methods that were implemented in a nondigital format, such as paper-based written exercises or in-person assessments where examiners observed performance (72\%). Conclusions: RCTs on VR and AR in medical education report a restricted range of outcomes, mostly skills and knowledge. The studies largely report immediate postintervention outcome data and use assessment methods that are in a nondigital format. Future RCTs should include a broader set of outcomes, report on the validity evidence of the measurement instruments used, and explore the use of assessments that are implemented digitally. ", doi="10.2196/29594", url="https://games.jmir.org/2022/2/e29594", url="http://www.ncbi.nlm.nih.gov/pubmed/35416789" } @Article{info:doi/10.2196/33476, author="Han, Gyu Ul and Lee, Jung-Yup and Kim, Ga-Young and Jo, Mini and Lee, Jaeseong and Bang, Ho Kyoung and Cho, Sang Young and Hong, Hwa Sung and Moon, Joon Il", title="Real-World Effectiveness of Wearable Augmented Reality Device for Patients With Hearing Loss: Prospective Study", journal="JMIR Mhealth Uhealth", year="2022", month="Mar", day="23", volume="10", number="3", pages="e33476", keywords="hearing loss", keywords="hearing aids", keywords="personal sound amplification product", keywords="wearable augmented reality device", abstract="Background: Hearing loss limits communication and social activity, and hearing aids (HAs) are an efficient rehabilitative option for improving oral communication and speech comprehension, as well as the psychosocial comfort of people with hearing loss. To overcome this problem, over-the-counter amplification devices including personal sound amplification products and wearable augmented reality devices (WARDs) have been introduced. Objective: This study aimed to evaluate the clinical effectiveness of WARDs for patients with mild to moderate hearing loss. Methods: A total of 40 patients (18 men and 22 women) with mild to moderate hearing loss were enrolled prospectively in this study. All participants were instructed to wear a WARD, Galaxy Buds Pro (Samsung Electronics), at least 4 hours a day for 2 weeks, for amplifying ambient sounds. Questionnaires including the Korean version of the abbreviated profile of hearing aid benefit (K-APHAB) and the Korean adaptation of the international outcome inventory for hearing aids (K-IOI-HA) were used to assess personal satisfaction in all participants. Audiologic tests, including sound field audiometry, sound field word recognition score (WRS), and the Korean version of hearing in noise test (K-HINT), were administered to 14 of 40 patients. The tests were performed under two conditions: unaided and aided with WARDs. Results: The mean age of the participants was 55.4 (SD 10.7) years. After 2 weeks of the field trial, participants demonstrated a benefit of WARDs on the K-APHAB. Scores of 3 subscales of ease of communication, reverberation, and background noise were improved significantly (P<.001). However, scores regarding aversiveness were worse under the aided condition (P<.001). K-IOI-HA findings indicated high user satisfaction after the 2-week field trial. On audiologic evaluation, the K-HINT did not show significant differences between unaided and aided conditions (P=.97). However, the hearing threshold on sound field audiometry (P=.001) and the WRS (P=.002) showed significant improvements under the aided condition. Conclusions: WARDs can be beneficial for patients with mild to moderate hearing loss as a cost-effective alternative to conventional hearing aids. ", doi="10.2196/33476", url="https://mhealth.jmir.org/2022/3/e33476", url="http://www.ncbi.nlm.nih.gov/pubmed/35320113" } @Article{info:doi/10.2196/28595, author="Ricci, Serena and Calandrino, Andrea and Borgonovo, Giacomo and Chirico, Marco and Casadio, Maura", title="Viewpoint: Virtual and Augmented Reality in Basic and Advanced Life Support Training", journal="JMIR Serious Games", year="2022", month="Mar", day="23", volume="10", number="1", pages="e28595", keywords="basic and advanced life support", keywords="first aid", keywords="cardiopulmonary resuscitation", keywords="emergency", keywords="training", keywords="simulation training", keywords="medical simulation", keywords="healthcare simulation", keywords="virtual reality", keywords="augmented reality", doi="10.2196/28595", url="https://games.jmir.org/2022/1/e28595", url="http://www.ncbi.nlm.nih.gov/pubmed/35319477" } @Article{info:doi/10.2196/30883, author="Zhang, Zhan and Joy, Karen and Harris, Richard and Ozkaynak, Mustafa and Adelgais, Kathleen and Munjal, Kevin", title="Applications and User Perceptions of Smart Glasses in Emergency Medical Services: Semistructured Interview Study", journal="JMIR Hum Factors", year="2022", month="Feb", day="28", volume="9", number="1", pages="e30883", keywords="smart glasses", keywords="hands-free technologies", keywords="emergency medical services", keywords="user studies", keywords="mobile phone", abstract="Background: Smart glasses have been gaining momentum as a novel technology because of their advantages in enabling hands-free operation and see-what-I-see remote consultation. Researchers have primarily evaluated this technology in hospital settings; however, limited research has investigated its application in prehospital operations. Objective: The aim of this study is to understand the potential of smart glasses to support the work practices of prehospital providers, such as emergency medical services (EMS) personnel. Methods: We conducted semistructured interviews with 13 EMS providers recruited from 4 hospital-based EMS agencies in an urban area in the east coast region of the United States. The interview questions covered EMS workflow, challenges encountered, technology needs, and users' perceptions of smart glasses in supporting daily EMS work. During the interviews, we demonstrated a system prototype to elicit more accurate and comprehensive insights regarding smart glasses. Interviews were transcribed verbatim and analyzed using the open coding technique. Results: We identified four potential application areas for smart glasses in EMS: enhancing teleconsultation between distributed prehospital and hospital providers, semiautomating patient data collection and documentation in real time, supporting decision-making and situation awareness, and augmenting quality assurance and training. Compared with the built-in touch pad, voice commands and hand gestures were indicated as the most preferred and suitable interaction mechanisms. EMS providers expressed positive attitudes toward using smart glasses during prehospital encounters. However, several potential barriers and user concerns need to be considered and addressed before implementing and deploying smart glasses in EMS practice. They are related to hardware limitations, human factors, reliability, workflow, interoperability, and privacy. Conclusions: Smart glasses can be a suitable technological means for supporting EMS work. We conclude this paper by discussing several design considerations for realizing the full potential of this hands-free technology. ", doi="10.2196/30883", url="https://humanfactors.jmir.org/2022/1/e30883", url="http://www.ncbi.nlm.nih.gov/pubmed/35225816" } @Article{info:doi/10.2196/31644, author="Gasteiger, Norina and van der Veer, N. Sabine and Wilson, Paul and Dowding, Dawn", title="How, for Whom, and in Which Contexts or Conditions Augmented and Virtual Reality Training Works in Upskilling Health Care Workers: Realist Synthesis", journal="JMIR Serious Games", year="2022", month="Feb", day="14", volume="10", number="1", pages="e31644", keywords="realist synthesis", keywords="realist review", keywords="review", keywords="virtual reality", keywords="augmented reality", keywords="simulation", keywords="training", keywords="health", keywords="health personnel", keywords="education", keywords="mobile phone", abstract="Background: Using traditional simulators (eg, cadavers, animals, or actors) to upskill health workers is becoming less common because of ethical issues, commitment to patient safety, and cost and resource restrictions. Virtual reality (VR) and augmented reality (AR) may help to overcome these barriers. However, their effectiveness is often contested and poorly understood and warrants further investigation. Objective: The aim of this review is to develop, test, and refine an evidence-informed program theory on how, for whom, and to what extent training using AR or VR works for upskilling health care workers and to understand what facilitates or constrains their implementation and maintenance. Methods: We conducted a realist synthesis using the following 3-step process: theory elicitation, theory testing, and theory refinement. We first searched 7 databases and 11 practitioner journals for literature on AR or VR used to train health care staff. In total, 80 papers were identified, and information regarding context-mechanism-outcome (CMO) was extracted. We conducted a narrative synthesis to form an initial program theory comprising of CMO configurations. To refine and test this theory, we identified empirical studies through a second search of the same databases used in the first search. We used the Mixed Methods Appraisal Tool to assess the quality of the studies and to determine our confidence in each CMO configuration. Results: Of the 41 CMO configurations identified, we had moderate to high confidence in 9 (22\%) based on 46 empirical studies reporting on VR, AR, or mixed simulation training programs. These stated that realistic (high-fidelity) simulations trigger perceptions of realism, easier visualization of patient anatomy, and an interactive experience, which result in increased learner satisfaction and more effective learning. Immersive VR or AR engages learners in deep immersion and improves learning and skill performance. When transferable skills and knowledge are taught using VR or AR, skills are enhanced and practiced in a safe environment, leading to knowledge and skill transfer to clinical practice. Finally, for novices, VR or AR enables repeated practice, resulting in technical proficiency, skill acquisition, and improved performance. The most common barriers to implementation were up-front costs, negative attitudes and experiences (ie, cybersickness), developmental and logistical considerations, and the complexity of creating a curriculum. Facilitating factors included decreasing costs through commercialization, increasing the cost-effectiveness of training, a cultural shift toward acceptance, access to training, and leadership and collaboration. Conclusions: Technical and nontechnical skills training programs using AR or VR for health care staff may trigger perceptions of realism and deep immersion and enable easier visualization, interactivity, enhanced skills, and repeated practice in a safe environment. This may improve skills and increase learning, knowledge, and learner satisfaction. The future testing of these mechanisms using hypothesis-driven approaches is required. Research is also required to explore implementation considerations. ", doi="10.2196/31644", url="https://games.jmir.org/2022/1/e31644", url="http://www.ncbi.nlm.nih.gov/pubmed/35156931" } @Article{info:doi/10.2196/30985, author="Vinolo Gil, Jesus Maria and Gonzalez-Medina, Gloria and Lucena-Anton, David and Perez-Cabezas, Veronica and Ruiz-Molinero, Carmen Mar{\'i}a Del and Mart{\'i}n-Valero, Roc{\'i}o", title="Augmented Reality in Physical Therapy: Systematic Review and Meta-analysis", journal="JMIR Serious Games", year="2021", month="Dec", day="15", volume="9", number="4", pages="e30985", keywords="augmented reality", keywords="physical therapy", keywords="rehabilitation", keywords="functionality", abstract="Background: Augmented reality (AR) is a rapidly expanding technology; it comprises the generation of new images from digital information in the real physical environment of a person, which simulates an environment where the artificial and real are mixed. The use of AR in physiotherapy has shown benefits in certain areas of patient health. However, these benefits have not been studied as a whole. Objective: This study aims to ascertain the current scientific evidence on AR therapy as a complement to physiotherapy and to determine the areas in which it has been used the most and which variables and methods have been most effective. Methods: A systematic review registered in PROSPERO (International Prospective Register of Systematic Reviews) was conducted following PRISMA (Preferred Reporting Items for Systematic Reviews and Meta?Analyses) recommendations. The search was conducted from July to August 2021 in the PubMed, PEDro, Web of Science, Scopus, and Cochrane Library scientific databases using the keywords augmented reality, physiotherapy, physical therapy, exercise therapy, rehabilitation, physical medicine, fitness, and occupational therapy. The methodological quality was evaluated using the PEDro scale and the Scottish Intercollegiate Guidelines Network scale to determine the degree of recommendation. The Cochrane Collaboration tool was used to evaluate the risk of bias. Results: In total, 11 articles were included in the systematic review. Of the 11 articles, 4 (36\%) contributed information to the meta-analysis. Overall, 64\% (7/11) obtained a good level of evidence, and most had a B degree of recommendation of evidence. A total of 308 participants were analyzed. Favorable results were found for the Berg Balance Scale (standardized mean change 0.473, 95\% CI ?0.0877 to 1.0338; z=1.65; P=.10) and the Timed Up and Go test (standardized mean change ?1.211, 95\% CI ?3.2005 to 0.7768; z=?1.194; P=.23). Conclusions: AR, in combination with conventional therapy, has been used for the treatment of balance and fall prevention in geriatrics, lower and upper limb functionality in stroke, pain in phantom pain syndrome, and turning in place in patients with Parkinson disease with freezing of gait. AR is effective for the improvement of balance; however, given the small size of the samples and the high heterogeneity of the studies, the results were not conclusive. Future studies using larger sample sizes and with greater homogeneity in terms of the devices used and the frequency and intensity of the interventions are needed. Trial Registration: PROSPERO International Prospective Register of Systematic Reviews CRD42020180766; https://www.crd.york.ac.uk/prospero/display\_record.php?RecordID=180766 ", doi="10.2196/30985", url="https://games.jmir.org/2021/4/e30985", url="http://www.ncbi.nlm.nih.gov/pubmed/34914611" } @Article{info:doi/10.2196/30184, author="Li, Chong and Song, Xinyu and Chen, Shugeng and Wang, Chuankai and He, Jieying and Zhang, Yongli and Xu, Shuo and Yan, Zhijie and Jia, Jie and Shull, Peter", title="Long-term Effectiveness and Adoption of a Cellphone Augmented Reality System on Patients with Stroke: Randomized Controlled Trial", journal="JMIR Serious Games", year="2021", month="Nov", day="23", volume="9", number="4", pages="e30184", keywords="stroke", keywords="augmented reality", keywords="serious game", keywords="upper limb motor function", keywords="cognitive function", keywords="home-based rehabilitation", abstract="Background: A serious game--based cellphone augmented reality system (CARS) was developed for rehabilitation of stroke survivors, which is portable, convenient, and suitable for self-training. Objective: This study aims to examine the effectiveness of CARS in improving upper limb motor function and cognitive function of stroke survivors via conducting a long-term randomized controlled trial and analyze the patient's acceptance of the proposed system. Methods: A double-blind randomized controlled trial was performed with 30 poststroke, subacute phase patients. All patients in both the experimental group (n=15) and the control group (n=15) performed a 1-hour session of therapy each day, 5 days per week for 2 weeks. Patients in the experimental group received 30 minutes of rehabilitation training with CARS and 30 minutes of conventional occupational therapy (OT) each session, while patients in the control group received conventional OT for the full 1 hour each session. The Fugl-Meyer Assessment of Upper Extremity (FMA-UE) subscale, Action Research Arm Test (ARAT), manual muscle test and Brunnstrom stage were used to assess motor function; the Mini-Mental State Examination, Add VS Sub, and Stroop Game were used to assess cognitive function; and the Barthel index was used to assess activities of daily living before and after the 2-week treatment period. In addition, the User Satisfaction Evaluation Questionnaire was used to reflect the patients' adoption of the system in the experimental group after the final intervention. Results: All the assessment scores of the experimental group and control group were significantly improved after intervention. After the intervention. The experimental group's FMA-UE and ARAT scores increased by 11.47 and 5.86, respectively, and were both significantly higher than the increase of the control group. Similarly, the score of the Add VS Sub and Stroop Game in the experimental group increased by 7.53 and 6.83, respectively, after the intervention, which also represented a higher increase than that in the control group. The evaluation of the adoption of this system had 3 sub-dimensions. In terms of accessibility, the patients reported a mean score of 4.27 (SD 0.704) for the enjoyment of their experience with the system, a mean 4.33 (SD 0.816) for success in using the system, and a mean 4.67 (SD 0.617) for the ability to control the system. In terms of comfort, the patients reported a mean 4.40 (SD 0.737) for the clarity of information provided by the system and a mean 4.40 (SD 0.632) for comfort. In terms of acceptability, the patients reported a mean 4.27 (SD 0.884) for usefulness in their rehabilitation and a mean 4.67 (0.617) in agreeing that CARS is a suitable tool for home-based rehabilitation. Conclusions: The rehabilitation based on combined CARS and conventional OT was more effective in improving both upper limb motor function and cognitive function than was conventional OT. Due to the low cost and ease of use, CARS is also potentially suitable for home-based rehabilitation. Trial Registration: Chinese Clinical Trial Registry ChiCTR1800017568; https://tinyurl.com/xbkkyfyz ", doi="10.2196/30184", url="https://games.jmir.org/2021/4/e30184", url="http://www.ncbi.nlm.nih.gov/pubmed/34817390" } @Article{info:doi/10.2196/29862, author="Chiang, Chih-Huei and Huang, Chiu-Mieh and Sheu, Jiunn-Jye and Liao, Jung-Yu and Hsu, Hsiao-Pei and Wang, Shih-Wen and Guo, Jong-Long", title="Examining the Effectiveness of 3D Virtual Reality Training on Problem-solving, Self-efficacy, and Teamwork Among Inexperienced Volunteers Helping With Drug Use Prevention: Randomized Controlled Trial", journal="J Med Internet Res", year="2021", month="Nov", day="2", volume="23", number="11", pages="e29862", keywords="3D virtual reality", keywords="volunteers", keywords="problem-solving", keywords="self-efficacy", keywords="teamwork", abstract="Background: Illegal drug usage among adolescents is a critical health problem. The Taiwanese government provides an accompanying volunteer program to prevent students who experiment with drugs from reusing them. An appropriate training program can improve volunteers' abilities to assist students using drugs. Problem-solving, self-efficacy, and teamwork are critical abilities for inexperienced volunteers who help with drug use prevention. By interacting with the animation or 3D virtual reality (VR) in the virtual scene, learners can immerse themselves in the virtual environment to learn, and 3D VR can increase learning opportunities and reduce the cost of human and material resources. Objective: The aim of this study was to examine the effectiveness of spherical video-based virtual reality (SVVR) training in improving problem-solving, self-efficacy, and teamwork among volunteers who helped prevent adolescents from using illegal drugs. Methods: This study used a randomized controlled design with a total of 68 participants in the experimental (n=35) and control (n=33) groups. The participants in the experimental group received the SVVR training program and their counterparts in the control group did not receive any training. Results: Generalized estimating equation analyses indicated that the experimental group showed significant posttraining improvements in problem-solving and self-efficacy but not teamwork when compared with the control group. Conclusions: The results of this study revealed that SVVR could improve participants' problem-solving skills and self-efficacy for assisting students in not using illegal drugs. However, future studies are suggested to develop effective SVVR to assist inexperienced volunteers in enhancing their teamwork abilities. We believed that introducing the training program to more sites can enhance volunteer training so that volunteers can have a better companionship effect when helping students quit drugs. Trial Registration: ClinicalTrials.gov NCT05072431; https://clinicaltrials.gov/ct2/show/NCT05072431 ", doi="10.2196/29862", url="https://www.jmir.org/2021/11/e29862", url="http://www.ncbi.nlm.nih.gov/pubmed/34726606" } @Article{info:doi/10.2196/17472, author="Follmann, Andreas and Ruhl, Alexander and G{\"o}sch, Michael and Felzen, Marc and Rossaint, Rolf and Czaplik, Michael", title="Augmented Reality for Guideline Presentation in Medicine: Randomized Crossover Simulation Trial for Technically Assisted Decision-making", journal="JMIR Mhealth Uhealth", year="2021", month="Oct", day="18", volume="9", number="10", pages="e17472", keywords="augmented reality", keywords="smart glasses", keywords="wearables", keywords="guideline presentation", keywords="decision support", keywords="triage", abstract="Background: Guidelines provide instructions for diagnostics and therapy in modern medicine. Various mobile devices are used to represent the potential complex decision trees. An example of time-critical decisions is triage in case of a mass casualty incident. Objective: In this randomized controlled crossover study, the potential of augmented reality for guideline presentation was evaluated and compared with the guideline presentation provided in a tablet PC as a conventional device. Methods: A specific Android app was designed for use with smart glasses and a tablet PC for the presentation of a triage algorithm as an example for a complex guideline. Forty volunteers simulated a triage based on 30 fictional patient descriptions, each with technical support from smart glasses and a tablet PC in a crossover trial design. The time to come to a decision and the accuracy were recorded and compared between both devices. Results: A total of 2400 assessments were performed by the 40 volunteers. A significantly faster time to triage was achieved in total with the tablet PC (median 12.8 seconds, IQR 9.4-17.7; 95\% CI 14.1-14.9) compared to that to triage with smart glasses (median 17.5 seconds, IQR 13.2-22.8, 95\% CI 18.4-19.2; P=.001). Considering the difference in the triage time between both devices, the additional time needed with the smart glasses could be reduced significantly in the course of assessments (21.5 seconds, IQR 16.5-27.3, 95\% CI 21.6-23.2) in the first run, 17.4 seconds (IQR 13-22.4, 95\% CI 17.6-18.9) in the second run, and 14.9 seconds (IQR 11.7-18.6, 95\% CI 15.2-16.3) in the third run (P=.001). With regard to the accuracy of the guideline decisions, there was no significant difference between both the devices. Conclusions: The presentation of a guideline on a tablet PC as well as through augmented reality achieved good results. The implementation with smart glasses took more time owing to their more complex operating concept but could be accelerated in the course of the study after adaptation. Especially in a non--time-critical working area where hands-free interfaces are useful, a guideline presentation with augmented reality can be of great use during clinical management. ", doi="10.2196/17472", url="https://mhealth.jmir.org/2021/10/e17472", url="http://www.ncbi.nlm.nih.gov/pubmed/34661548" } @Article{info:doi/10.2196/27036, author="Zhang, Bo and Robb, Nigel", title="Immersion Experiences in a Tablet-Based Markerless Augmented Reality Working Memory Game: Randomized Controlled Trial and User Experience Study", journal="JMIR Serious Games", year="2021", month="Oct", day="12", volume="9", number="4", pages="e27036", keywords="augmented reality", keywords="markerless augmented reality", keywords="immersion experience", keywords="cognitive training games", keywords="working memory", keywords="markerless augmented reality n-back game", abstract="Background: In recent years, augmented reality (AR), especially markerless augmented reality (MAR), has been used more prevalently to create training games in an attempt to improve humans' cognitive functions. This has been driven by studies claiming that MAR provides users with more immersive experiences that are situated in the real world. Currently, no studies have scientifically investigated the immersion experience of users in a MAR cognitive training game. Moreover, there is an observed lack of instruments on measuring immersion in MAR cognitive training games. Objective: This study, using two existing immersion questionnaires, investigates students' immersion experiences in a novel MAR n-back game. Methods: The n-back task is a continuous performance task that taps working memory (WM) capacity. We compared two versions of n-back training. One was presented in a traditional 2D format, while the second version used MAR. There were 2 experiments conducted in this study that coordinated with 2 types of immersion questionnaires: the modified Immersive Experiences Questionnaire (IEQ) and the Augmented Reality Immersion (ARI) questionnaire. Two groups of students from two universities in China joined the study, with 60 participants for the first experiment (a randomized controlled experiment) and 51 participants for the second. Results: Both groups of students experienced immersion in the MAR n-back game. However, the MAR n-back training group did not experience stronger immersion than the traditional (2D) n-back control group in the first experiment. The results of the second experiment showed that males felt deeply involved with the AR environment, which resulted in obtaining higher levels of immersion than females in the MAR n-back game. Conclusions: Both groups of students experienced immersion in the MAR n-back game. Moreover, both the modified IEQ and ARI have the potential to be used as instruments to measure immersion in MAR game settings. Trial Registration: UMIN Clinical Trials Registry UMIN000045314; https://upload.umin.ac.jp/cgi-open-bin/ctr\_e/ctr\_view.cgi?recptno=R000051725 ", doi="10.2196/27036", url="https://games.jmir.org/2021/4/e27036", url="http://www.ncbi.nlm.nih.gov/pubmed/34636738" } @Article{info:doi/10.2196/29899, author="Toto, L. Regina and Vorel, S. Ethan and Tay, E. Khoon-Yen and Good, L. Grace and Berdinka, M. Jesse and Peled, Adam and Leary, Marion and Chang, P. Todd and Weiss, K. Anna and Balamuth, B. Frances", title="Augmented Reality in Pediatric Septic Shock Simulation: Randomized Controlled Feasibility Trial", journal="JMIR Med Educ", year="2021", month="Oct", day="6", volume="7", number="4", pages="e29899", keywords="augmented reality", keywords="simulation", keywords="septic shock", keywords="children", keywords="pediatrics", keywords="simulation-based education", keywords="application", keywords="fluid administration", abstract="Background: Septic shock is a low-frequency but high-stakes condition in children requiring prompt resuscitation, which makes it an important target for simulation-based education. Objective: In this study, we aimed to design and implement an augmented reality app (PediSepsisAR) for septic shock simulation, test the feasibility of measuring the timing and volume of fluid administration during septic shock simulation with and without PediSepsisAR, and describe PediSepsisAR as an educational tool. We hypothesized that we could feasibly measure our desired data during the simulation in 90\% of the participants in each group. With regard to using PediSepsisAR as an educational tool, we hypothesized that the PediSepsisAR group would report that it enhanced their awareness of simulated patient blood flow and would more rapidly verbalize recognition of abnormal patient status and desired management steps. Methods: We performed a randomized controlled feasibility trial with a convenience sample of pediatric care providers at a large tertiary care pediatric center. Participants completed a prestudy questionnaire and were randomized to either the PediSepsisAR or control (traditional simulation) arms. We measured the participants' time to administer 20, 40, and 60 cc/kg of intravenous fluids during a septic shock simulation using each modality. In addition, facilitators timed how long participants took to verbalize they had recognized tachycardia, hypotension, or septic shock and desired to initiate the sepsis pathway and administer antibiotics. Participants in the PediSepsisAR arm completed a poststudy questionnaire. We analyzed data using descriptive statistics and a Wilcoxon rank-sum test to compare the median time with event variables between groups. Results: We enrolled 50 participants (n=25 in each arm). The timing and volume of fluid administration were captured in all the participants in each group. There was no statistically significant difference regarding time to administration of intravenous fluids between the two groups. Similarly, there was no statistically significant difference between the groups regarding time to verbalized recognition of patient status or desired management steps. Most participants in the PediSepsisAR group reported that PediSepsisAR enhanced their awareness of the patient's perfusion. Conclusions: We developed an augmented reality app for use in pediatric septic shock simulations and demonstrated the feasibility of measuring the volume and timing of fluid administration during simulation using this modality. In addition, our findings suggest that PediSepsisAR may enhance participants' awareness of abnormal perfusion. ", doi="10.2196/29899", url="https://mededu.jmir.org/2021/4/e29899", url="http://www.ncbi.nlm.nih.gov/pubmed/34612836" } @Article{info:doi/10.2196/28767, author="Rigamonti, Lia and Secchi, Matteo and Lawrence, B. Jimmy and Labianca, Luca and Wolfarth, Bernd and Peters, Harm and Bonaventura, Klaus and Back, Alexander David", title="An Augmented Reality Device for Remote Supervision of Ultrasound Examinations in International Exercise Science Projects: Usability Study", journal="J Med Internet Res", year="2021", month="Oct", day="5", volume="23", number="10", pages="e28767", keywords="augmented reality", keywords="ultrasound", keywords="social media", keywords="remote", keywords="exercise science", abstract="Background: Support for long-distance research and clinical collaborations is in high demand and has increased owing to COVID-19--related restrictions on travel and social contact. New digital approaches are required for remote scientific exchange. Objective: This study aims to analyze the options of using an augmented reality device for remote supervision of exercise science examinations. Methods: A mobile ultrasound examination of the diameter and intima-media thickness of the femoral and carotid arteries was remotely supervised using a head-mounted augmented reality device. All participants were provided with a link to a YouTube video of the technique in advance. In part 1, 8 international experts from the fields of engineering and sports science were remotely connected to the study setting. Internet connection speed was noted, and a structured interview was conducted. In part 2, 2 remote supervisors evaluated 8 physicians performing an examination on a healthy human subject. The results were recorded, and an evaluation was conducted using a 25-item questionnaire. Results: In part 1, the remote experts were connected over a mean distance of 1587 km to the examination site. Overall transmission quality was good (mean upload speed: 28.7 Mbps, mean download speed: 97.3 Mbps, mean ping: 21.6 milliseconds). In the interview, participants indicated that the main potential benefits would be to the fields of education, movement analysis, and supervision. Challenges regarding internet connection stability and previous training with the devices used were reported. In part 2, physicians' examinations showed good interrater correlation (interclass correlation coefficient: 0.84). Participants valued the experienced setting as highly positive. Conclusions: The study showed the good feasibility of the chosen design and a highly positive attitude of all participants toward this digital approach. Head-mounted augmented reality devices are generally recommended for collaborative research projects with physical examination--based research questions. ", doi="10.2196/28767", url="https://www.jmir.org/2021/10/e28767", url="http://www.ncbi.nlm.nih.gov/pubmed/34609312" } @Article{info:doi/10.2196/28345, author="Schmucker, Michael and Haag, Martin", title="Automated Size Recognition in Pediatric Emergencies Using Machine Learning and Augmented Reality: Within-Group Comparative Study", journal="JMIR Form Res", year="2021", month="Sep", day="20", volume="5", number="9", pages="e28345", keywords="resuscitation", keywords="emergency medicine", keywords="mobile applications", keywords="mobile phone", keywords="user-computer interface", keywords="augmented reality", keywords="machine learning", abstract="Background: Pediatric emergencies involving children are rare events, and the experience of emergency physicians and the results of such emergencies are accordingly poor. Anatomical peculiarities and individual adjustments make treatment during pediatric emergency susceptible to error. Critical mistakes especially occur in the calculation of weight-based drug doses. Accordingly, the need for a ubiquitous assistance service that can, for example, automate dose calculation is high. However, few approaches exist due to the complexity of the problem. Objective: Technically, an assistance service is possible, among other approaches, with an app that uses a depth camera that is integrated in smartphones or head-mounted displays to provide a 3D understanding of the environment. The goal of this study was to automate this technology as much as possible to develop and statistically evaluate an assistance service that does not have significantly worse measurement performance than an emergency ruler (the state of the art). Methods: An assistance service was developed that uses machine learning to recognize patients and then automatically determines their size. Based on the size, the weight is automatically derived, and the dosages are calculated and presented to the physician. To evaluate the app, a small within-group design study was conducted with 17 children, who were each measured with the app installed on a smartphone with a built-in depth camera and a state-of-the-art emergency ruler. Results: According to the statistical results (one-sample t test; P=.42; $\alpha$=.05), there is no significant difference between the measurement performance of the app and an emergency ruler under the test conditions (indoor, daylight). The newly developed measurement method is thus not technically inferior to the established one in terms of accuracy. Conclusions: An assistance service with an integrated augmented reality emergency ruler is technically possible, although some groundwork is still needed. The results of this study clear the way for further research, for example, usability testing. ", doi="10.2196/28345", url="https://formative.jmir.org/2021/9/e28345", url="http://www.ncbi.nlm.nih.gov/pubmed/34542416" } @Article{info:doi/10.2196/26520, author="Ong, Triton and Wilczewski, Hattie and Paige, R. Samantha and Soni, Hiral and Welch, M. Brandon and Bunnell, E. Brian", title="Extended Reality for Enhanced Telehealth During and Beyond COVID-19: Viewpoint", journal="JMIR Serious Games", year="2021", month="Jul", day="26", volume="9", number="3", pages="e26520", keywords="extended reality", keywords="virtual reality", keywords="augmented reality", keywords="mixed reality", keywords="telehealth", keywords="telemedicine", keywords="COVID-19", keywords="telepresence", doi="10.2196/26520", url="https://games.jmir.org/2021/3/e26520", url="http://www.ncbi.nlm.nih.gov/pubmed/34227992" } @Article{info:doi/10.2196/29080, author="Barteit, Sandra and Lanfermann, Lucia and B{\"a}rnighausen, Till and Neuhann, Florian and Beiersmann, Claudia", title="Augmented, Mixed, and Virtual Reality-Based Head-Mounted Devices for Medical Education: Systematic Review", journal="JMIR Serious Games", year="2021", month="Jul", day="8", volume="9", number="3", pages="e29080", keywords="virtual reality", keywords="augmented reality", keywords="global health", keywords="income-limited countries", keywords="medical education", abstract="Background: Augmented reality (AR), mixed reality (MR), and virtual reality (VR), realized as head-mounted devices (HMDs), may open up new ways of teaching medical content for low-resource settings. The advantages are that HMDs enable repeated practice without adverse effects on the patient in various medical disciplines; may introduce new ways to learn complex medical content; and may alleviate financial, ethical, and supervisory constraints on the use of traditional medical learning materials, like cadavers and other skills lab equipment. Objective: We examine the effectiveness of AR, MR, and VR HMDs for medical education, whereby we aim to incorporate a global health perspective comprising low- and middle-income countries (LMICs). Methods: We conducted a systematic review according to PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analysis) and Cochrane guidelines. Seven medical databases (PubMed, Cochrane Library, Web of Science, Science Direct, PsycINFO, Education Resources Information Centre, and Google Scholar) were searched for peer-reviewed publications from January 1, 2014, to May 31, 2019. An extensive search was carried out to examine relevant literature guided by three concepts of extended reality (XR), which comprises the concepts of AR, MR, and VR, and the concepts of medicine and education. It included health professionals who took part in an HMD intervention that was compared to another teaching or learning method and evaluated with regard to its effectiveness. Quality and risk of bias were assessed with the Medical Education Research Study Quality Instrument, the Newcastle-Ottawa Scale-Education, and A Cochrane Risk of Bias Assessment Tool for Non-Randomized Studies of Interventions. We extracted relevant data and aggregated the data according to the main outcomes of this review (knowledge, skills, and XR HMD). Results: A total of 27 studies comprising 956 study participants were included. The participants included all types of health care professionals, especially medical students (n=573, 59.9\%) and residents (n=289, 30.2\%). AR and VR implemented with HMDs were most often used for training in the fields of surgery (n=13, 48\%) and anatomy (n=4, 15\%). A range of study designs were used, and quantitative methods were clearly dominant (n=21, 78\%). Training with AR- and VR-based HMDs was perceived as salient, motivating, and engaging. In the majority of studies (n=17, 63\%), HMD-based interventions were found to be effective. A small number of included studies (n=4, 15\%) indicated that HMDs were effective for certain aspects of medical skills and knowledge learning and training, while other studies suggested that HMDs were only viable as an additional teaching tool (n=4, 15\%). Only 2 (7\%) studies found no effectiveness in the use of HMDs. Conclusions: The majority of included studies suggested that XR-based HMDs have beneficial effects for medical education, whereby only a minority of studies were from LMICs. Nevertheless, as most studies showed at least noninferior results when compared to conventional teaching and training, the results of this review suggest applicability and potential effectiveness in LMICs. Overall, users demonstrated greater enthusiasm and enjoyment in learning with XR-based HMDs. It has to be noted that many HMD-based interventions were small-scale and conducted as short-term pilots. To generate relevant evidence in the future, it is key to rigorously evaluate XR-based HMDs with AR and VR implementations, particularly in LMICs, to better understand the strengths and shortcomings of HMDs for medical education. ", doi="10.2196/29080", url="https://games.jmir.org/2021/3/e29080", url="http://www.ncbi.nlm.nih.gov/pubmed/34255668" } @Article{info:doi/10.2196/27972, author="Nishchyk, Anna and Chen, Weiqin and Pripp, Hugo Are and Bergland, Astrid", title="The Effect of Mixed Reality Technologies for Falls Prevention Among Older Adults: Systematic Review and Meta-analysis", journal="JMIR Aging", year="2021", month="Jun", day="30", volume="4", number="2", pages="e27972", keywords="falls", keywords="fall prevention", keywords="mixed reality", keywords="augmented reality", keywords="virtual reality", keywords="physical exercise", abstract="Background: Falling is one of the most common and serious age-related issues, and falls can significantly impair the quality of life of older adults. Approximately one-third of people over 65 experience a fall annually. Previous research has shown that physical exercise could help reduce falls among older adults and improve their health. However, older adults often find it challenging to follow and adhere to physical exercise programs. Interventions using mixed reality (MR) technology could help address these issues. MR combines artificial augmented computer-generated elements with the real world. It has frequently been used for training and rehabilitation purposes. Objective: The aim of this systematic literature review and meta-analysis was to investigate the use of the full spectrum of MR technologies for fall prevention intervention and summarize evidence of the effectiveness of this approach. Methods: In our qualitative synthesis, we analyzed a number of features of the selected studies, including aim, type of exercise, technology used for intervention, study sample size, participant demographics and history of falls, study design, involvement of health professionals or caregivers, duration and frequency of the intervention, study outcome measures, and results of the study. To systematically assess the results of the selected studies and identify the common effect of MR interventions, a meta-analysis was performed. Results: Seven databases were searched, and the initial search yielded 5838 results. With the considered inclusion and exclusion criteria, 21 studies were included in the qualitative synthesis and 12 were included in meta-analysis. The majority of studies demonstrated a positive effect of an MR intervention on fall risk factors among older participants. The meta-analysis demonstrated a statistically significant difference in Berg Balance Scale score between the intervention and control groups (ES: 0.564; 95\% CI 0.246-0.882; P<.001) with heterogeneity statistics of I2=54.9\% and Q=17.74 (P=.02), and a statistical difference in Timed Up and Go test scores between the intervention and control groups (ES: 0.318; 95\% CI 0.025-0.662; P<.001) with heterogeneity statistics of I2=77.6\% and Q=44.63 (P<.001). The corresponding funnel plot and the Egger test for small-study effects (P=.76 and P=.11 for Berg Balance Scale and Timed Up and Go, respectively) indicate that a minor publication bias in the studies might be present in the Berg Balance Scale results. Conclusions: The literature review and meta-analysis demonstrate that the use of MR interventions can have a positive effect on physical functions in the elderly. MR has the potential to help older users perform physical exercises that could improve their health conditions. However, more research on the effect of MR fall prevention interventions should be conducted with special focus given to MR usability issues. ", doi="10.2196/27972", url="https://aging.jmir.org/2021/2/e27972", url="http://www.ncbi.nlm.nih.gov/pubmed/34255643" } @Article{info:doi/10.2196/26963, author="Seals, Ayanna and Olaosebikan, Monsurat and Otiono, Jennifer and Shaer, Orit and Nov, Oded", title="Effects of Self-focused Augmented Reality on Health Perceptions During the COVID-19 Pandemic: A Web-Based Between-Subject Experiment", journal="J Med Internet Res", year="2021", month="Jun", day="29", volume="23", number="6", pages="e26963", keywords="COVID-19", keywords="health behavior", keywords="augmented reality", keywords="self-focused attention", keywords="vicarious reinforcement", keywords="human-computer interactions", keywords="hand hygiene", keywords="perception", abstract="Background: Self-focused augmented reality (AR) technologies are growing in popularity and present an opportunity to address health communication and behavior change challenges. Objective: We aimed to examine the impact of self-focused AR and vicarious reinforcement on psychological predictors of behavior change during the COVID-19 pandemic. In addition, our study included measures of fear and message minimization to assess potential adverse reactions to the design interventions. Methods: A between-subjects web-based experiment was conducted to compare the health perceptions of participants in self-focused AR and vicarious reinforcement design conditions to those in a control condition. Participants were randomly assigned to the control group or to an intervention condition (ie, self-focused AR, reinforcement, self-focus AR {\texttimes} reinforcement, and avatar). Results: A total of 335 participants were included in the analysis. We found that participants who experienced self-focused AR and vicarious reinforcement scored higher in perceived threat severity (P=.03) and susceptibility (P=.01) when compared to the control. A significant indirect effect of self-focused AR and vicarious reinforcement on intention was found with perceived threat severity as a mediator (b=.06, 95\% CI 0.02-0.12, SE .02). Self-focused AR and vicarious reinforcement did not result in higher levels of fear (P=.32) or message minimization (P=.42) when compared to the control. Conclusions: Augmenting one's reflection with vicarious reinforcement may be an effective strategy for health communication designers. While our study's results did not show adverse effects in regard to fear and message minimization, utilization of self-focused AR as a health communication strategy should be done with care due to the possible adverse effects of heightened levels of fear. ", doi="10.2196/26963", url="https://www.jmir.org/2021/6/e26963", url="http://www.ncbi.nlm.nih.gov/pubmed/33878017" } @Article{info:doi/10.2196/27165, author="Elrose, Francine and Hill, Andrew and Liu, David and Salisbury, Isaac and LeCong, Thien and Loeb, G. Robert and Sanderson, Penelope", title="The Use of Head-Worn Displays for Vital Sign Monitoring in Critical and Acute Care: Systematic Review", journal="JMIR Mhealth Uhealth", year="2021", month="May", day="11", volume="9", number="5", pages="e27165", keywords="wearable", keywords="wearable device", keywords="head-mounted display", keywords="head-worn display", keywords="clinical setting", keywords="medical setting", keywords="patient monitoring", keywords="healthcare", abstract="Background: Continuous monitoring of patient vital signs may improve patient outcomes. Head-worn displays (HWDs) can provide hands-free access to continuous vital sign information of patients in critical and acute care contexts and thus may reduce instances of unrecognized patient deterioration. Objective: The purpose of the study is to conduct a systematic review of the literature to evaluate clinical, surrogate, and process outcomes when clinicians use HWDs for continuous patient vital sign monitoring. Methods: The review was registered with PROSPERO (CRD42019119875) and followed the PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-analyses) guidelines. A literature search was conducted for articles published between January 1995 and June 2020 using the following databases: PubMed, Embase, CINAHL, PsycINFO, and Web of Science. Overall, 2 reviewers independently screened titles and abstracts and then assessed the full text of the articles. Original research articles that evaluated the clinical, surrogate, or process outcomes of head-mounted displays for continuous vital sign monitoring in critical care or acute care contexts were included. Results: Of the 214 records obtained, 15 (7\%) articles met the predefined criteria and were included in this review. Of the 15 studies, 7 (47\%) took place in a clinical context, whereas the remainder took place in a simulation environment. In 100\% (7/7) of the studies that evaluated gaze behavior, changes were found in gaze direction with HWDs. Change detection improvements were found in 67\% (2/3) of the studies evaluating changes in the participants' ability to detect changes in vital signs. Of the 10 studies assessing the ease of use of the HWD, most participants of 7 (70\%) studies reported that the HWD was easy to use. In all 6 studies in which participants were asked if they would consider using the HWD in their practice, most participants responded positively, but they often suggested improvements on the HWD hardware or display design. Of the 7 studies conducted in clinical contexts, none reported any clinical outcomes. Conclusions: Although there is limited and sometimes conflicting evidence about the benefits of HWDs from certain surrogate and process outcomes, evidence for clinical outcomes is lacking. Recommendations are to employ user-centered design when developing HWDs, perform longitudinal studies, and seek clinical outcomes. Trial Registration: PROSPERO International Prospective Register of Systematic Reviews CRD42019119875; https://www.crd.york.ac.uk/prospero/display\_record.php?RecordID=119875 ", doi="10.2196/27165", url="https://mhealth.jmir.org/2021/5/e27165", url="http://www.ncbi.nlm.nih.gov/pubmed/33973863" } @Article{info:doi/10.2196/25916, author="Logan, E. Deirdre and Simons, E. Laura and Caruso, J. Thomas and Gold, I. Jeffrey and Greenleaf, Walter and Griffin, Anya and King, D. Christopher and Menendez, Maria and Olbrecht, A. Vanessa and Rodriguez, Samuel and Silvia, Megan and Stinson, N. Jennifer and Wang, Ellen and Williams, E. Sara and Wilson, Luke", title="Leveraging Virtual Reality and Augmented Reality to Combat Chronic Pain in Youth: Position Paper From the Interdisciplinary Network on Virtual and Augmented Technologies for Pain Management", journal="J Med Internet Res", year="2021", month="Apr", day="26", volume="23", number="4", pages="e25916", keywords="virtual reality", keywords="pediatric", keywords="chronic pain", abstract="Background: Virtual reality (VR) and augmented reality (AR) interventions are emerging as promising tools in the treatment of pediatric chronic pain conditions. However, in this young field, there is little consensus to guide the process of engaging in the development and evaluation of targeted VR-based interventions. Objective: The INOVATE-Pain (Interdisciplinary Network on Virtual and Augmented Technologies for Pain management) consortium aims to advance the field of VR for pediatric chronic pain rehabilitation by providing guidance for best practices in the design, evaluation, and dissemination of VR-based interventions targeting this population. Methods: An interdisciplinary meeting of 16 academics, clinicians, industry partners, and philanthropy partners was held in January 2020. Results: Reviewing the state of the field, the consortium identified important directions for research-driven innovation in VR and AR clinical care, highlighted key opportunities and challenges facing the field, and established a consensus on best methodological practices to adopt in future efforts to advance the research and practice of VR and AR in pediatric pain. The consortium also identified important next steps to undertake to continue to advance the work in this promising new area of digital health pain interventions. Conclusions: To realize the promise of this realm of innovation, key ingredients for success include productive partnerships among industry, academic, and clinical stakeholders; a uniform set of outcome domains and measures for standardized evaluation; and widespread access to the latest opportunities, tools, and resources. The INOVATE-Pain collaborative hopes to promote the creation, rigorous yet efficient evaluation, and dissemination of innovative VR-based interventions to reduce pain and improve quality of life for children. ", doi="10.2196/25916", url="https://www.jmir.org/2021/4/e25916", url="http://www.ncbi.nlm.nih.gov/pubmed/33667177" } @Article{info:doi/10.2196/24313, author="Kim, Kyung Sun and Lee, Youngho and Yoon, Hyoseok and Choi, Jongmyung", title="Adaptation of Extended Reality Smart Glasses for Core Nursing Skill Training Among Undergraduate Nursing Students: Usability and Feasibility Study", journal="J Med Internet Res", year="2021", month="Mar", day="2", volume="23", number="3", pages="e24313", keywords="nursing education", keywords="skill training", keywords="self-practice", keywords="smart glass", keywords="usability", keywords="feasibility", abstract="Background: Skill training in nursing education has been highly dependent on self-training because of Korea's high student-faculty ratio. Students tend to have a passive attitude in self-practice, and it is hard to expect effective learning outcomes with traditional checklist-dependent self-practice. Smart glasses have a high potential to assist nursing students with timely information, and a hands-free device does not interrupt performance. Objective: This study aimed to develop a smart glass--based nursing skill training program and evaluate its usability and feasibility for the implementation of self-practice. Methods: We conducted a usability and feasibility study with 30 undergraduate nursing students during a 2-hour open lab for self-practice of core nursing skills, wearing smart glasses for visualized guidance. The usability test was conducted using a 16-item self-reporting questionnaire and 7 open-ended questions. Learning satisfaction was assessed using a 7-item questionnaire. The number of practice sessions was recorded, and perceived competency in core nursing skills was measured before and after the intervention. At the final evaluation, performance accuracy and time consumed for completion were recorded. Results: Smart glass--assisted self-practice of nursing skills was perceived as helpful, convenient, and interesting. Participants reported improved recollection of sequences of skills, and perceived competency was significantly improved. Several issues were raised by participants regarding smart glasses, including small screen size, touch sensors, fogged lenses with masks, heaviness, and heat after a period of time. Conclusions: Smart glasses have the potential to assist self-practice, providing timely information at students' own paces. Having both hands free from holding a device, participants reported the convenience of learning as they could practice and view the information simultaneously. Further revision correcting reported issues would improve the applicability of smart glasses in other areas of nursing education. ", doi="10.2196/24313", url="https://www.jmir.org/2021/3/e24313", url="http://www.ncbi.nlm.nih.gov/pubmed/33650975" } @Article{info:doi/10.2196/21643, author="Vinci, Christine and Brandon, O. Karen and Kleinjan, Marloes and Hernandez, M. Laura and Sawyer, E. Leslie and Haneke, Jody and Sutton, K. Steven and Brandon, H. Thomas", title="Augmented Reality for Smoking Cessation: Development and Usability Study", journal="JMIR Mhealth Uhealth", year="2020", month="Dec", day="31", volume="8", number="12", pages="e21643", keywords="augmented reality", keywords="smoking cessation", keywords="cue exposure therapy", keywords="cue reactivity", keywords="behavior change", keywords="smoking", keywords="smartphone app", keywords="mobile phone", abstract="Background: The recent widespread availability of augmented reality via smartphone offers an opportunity to translate cue exposure therapy for smoking cessation from the laboratory to the real world. Despite significant reductions in the smoking rates in the last decade, approximately 13.7\% of the adults in the United States continue to smoke. Smoking-related cue exposure has demonstrated promise as an adjuvant therapy in the laboratory, but practical limitations have prevented its success in the real world. Augmented reality technology presents an innovative approach to overcome these limitations. Objective: The aim of this study was to develop a smartphone app that presents smoking-related augmented reality images for cue exposure. Smokers provided feedback on the images and reported on the perceived urge to smoke, qualities of reality/coexistence, and general feedback about quality and functioning. The feedback was used to refine the augmented reality images within the app. Methods: In collaboration with an augmented reality design company, we developed 6 smoking-related images (cigarette, lighter, ashtray, lit cigarette in ashtray, etc) and 6 neutral images similar in size or complexity for comparison (pen, eraser, notebook, soda bottle with droplets, etc). Ten smokers completed a survey of demographic characteristics, smoking history and behavior, dependence on nicotine, motivation to quit smoking, and familiarity with augmented reality technology. Then, participants viewed each augmented reality image and provided ratings on 10-point Likert scales for urge to smoke and reality/coexistence of the image into the scene. Participants were also queried with open-ended questions regarding the features of the images. Results: Of the 10 participants, 5 (50\%) had experienced augmented reality prior to the laboratory visit, but only 4 of those 5 participants used augmented reality at least weekly. Although the sample was small (N=10), smokers reported significantly higher urge to smoke after viewing the smoking-related augmented reality images (median 4.58, SD 3.49) versus the neutral images (median 1.42, SD 3.01) (Z=--2.14, P=.03; d=0.70). The average reality and coexistence ratings of the images did not differ between smoking-related and neutral images (all P>.29). Augmented reality images were found on average to be realistic (mean [SD] score 6.49 [3.11]) and have good environmental coexistence (mean [SD] score 6.93 [3.04]) and user coexistence (mean [SD] score 6.38 [3.27]) on the 10-point scale. Participant interviews revealed some areas of excellence (eg, details of the lit cigarette) and areas for improvement (eg, stability of images, lighting). Conclusions: All images were generally perceived as being realistic and well-integrated into the environment. However, the smoking augmented reality images produced higher urge to smoke than the neutral augmented reality images. In total, our findings support the potential utility of augmented reality for cue exposure therapy. Future directions and next steps are discussed. ", doi="10.2196/21643", url="http://mhealth.jmir.org/2020/12/e21643/", url="http://www.ncbi.nlm.nih.gov/pubmed/33382377" } @Article{info:doi/10.2196/25117, author="Ellis, A. Louise and Lee, D. Matthew and Ijaz, Kiran and Smith, James and Braithwaite, Jeffrey and Yin, Kathleen", title="COVID-19 as `Game Changer' for the Physical Activity and Mental Well-Being of Augmented Reality Game Players During the Pandemic: Mixed Methods Survey Study", journal="J Med Internet Res", year="2020", month="Dec", day="22", volume="22", number="12", pages="e25117", keywords="COVID-19", keywords="Pok{\'e}mon GO", keywords="Harry Potter: Wizards Unite", keywords="augmented reality games", keywords="physical activity", keywords="mental health", keywords="well-being", abstract="Background: Location-based augmented reality (AR) games, such as Pok{\'e}mon GO and Harry Potter: Wizards Unite, have been shown to have a beneficial impact on the physical activity, social connectedness, and mental health of their players. In March 2020, global social distancing measures related to the COVID-19 pandemic prompted the AR games developer Niantic Inc to implement several changes to ensure continued player engagement with Pok{\'e}mon GO and Harry Potter: Wizards Unite. We sought to examine how the physical and mental well-being of players of these games were affected during the unprecedented COVID-19 restriction period as well as how their video game engagement was affected. Objective: The aims of this study were to examine the impact of COVID-19--related social restrictions on the physical and mental well-being of AR game players; to examine the impact of COVID-19--related social restrictions on the use of video games and motivations for their use; and to explore the potential role of AR games (and video games in general) in supporting well-being during COVID-19--related social restrictions. Methods: A mixed methods web-based self-reported survey was conducted in May 2020, during which COVID-19--related social restrictions were enforced in many countries. Participants were recruited on the web via four subreddits dedicated to Pok{\'e}mon GO or Harry Potter: Wizards Unite. Data collected included quantitative data on demographics, time spent playing video games, physical activity, and mental health; qualitative data included motivations to play and the impact of video games on mental health during COVID-19 lockdown. Results: We report results for 2004 participants (1153/1960 male, 58.8\%, average age 30.5 years). Self-reported physical activity during COVID-19--related social restrictions significantly decreased from 7.50 hours per week on average (SD 11.12) to 6.50 hours (SD 7.81) (P<.001). More than half of the participants reported poor mental health (925/1766, 52.4\%; raw World Health Organization--5 Well-Being Index score <13). Female gender, younger age, and reduced exercise were significant predictors of poor mental health. Participants reported a significant increase in video game play time from 16.38 hours per week on average (SD 19.12) to 20.82 hours (SD 17.49) (P<.001). Approximately three quarters of the participants (n=1102/1427, 77.2\%) reported that playing video games had been beneficial to their mental health. The changes made to Pok{\'e}mon GO and Harry Potter: Wizards Unite were very well received by players, and the players continued to use these games while exercising and to maintain social connection. In addition to seeking an escape during the pandemic and as a form of entertainment, participants reported that they used video games for emotional coping and to lower stress, relax, and alleviate mental health conditions. Conclusions: AR games have the potential to promote physical and mental health during the COVID-19 pandemic. Used by populations under isolation and distress, these games can improve physical and mental health by providing virtual socialization, sustained exercise, temporal routine, and mental structure. Further research is needed to explore the potential of AR games as digital behavioral interventions to maintain human well-being in the wider population. ", doi="10.2196/25117", url="http://www.jmir.org/2020/12/e25117/", url="http://www.ncbi.nlm.nih.gov/pubmed/33284781" } @Article{info:doi/10.2196/22007, author="Chen, Yen-Fu and Janicki, Sylvia", title="A Cognitive-Based Board Game With Augmented Reality for Older Adults: Development and Usability Study", journal="JMIR Serious Games", year="2020", month="Dec", day="14", volume="8", number="4", pages="e22007", keywords="cognitive-based", keywords="augmented reality", keywords="board game", keywords="older adults", keywords="cognitive health", keywords="serious game", abstract="Background: Older adults in Taiwan are advised to adopt regular physical and social activities for the maintenance of their cognitive and physical health. Games offer a means of engaging older individuals in these activities. For this study, a collaborative cognitive-based board game, Nostalgic Seekers, was designed and developed with augmented reality technology to support cognitive engagement in older adults. Objective: A user study of the board game was conducted to understand how the game facilitates communication, problem solving, and emotional response in older players and whether augmented reality is a suitable technology in game design for these players. Methods: A total of 23 participants aged 50 to 59 years were recruited to play and evaluate the game. In each session, participants' interactions were observed and recorded, then analyzed through Bales' interaction process analysis. Following each session, participants were interviewed to provide feedback on their experience. Results: The quantitative analysis results showed that the participants engaged in task-oriented communication more frequently than social-emotional communication during the game. In particular, there was a high number of answers relative to questions. The analysis also showed a significant positive correlation between task-oriented acts and the game score. Qualitative analysis indicated that participants found the experience of playing the game enjoyable, nostalgic objects triggered positive emotional responses, and augmented reality technology was widely accepted by participants and provided effective engagement in the game. Conclusions: Nostalgic Seekers provided cognitive exercise and social engagement to players and demonstrated the positive potential of integrating augmented reality technology into cognitive-based games for older adults. Future game designs could explore strategies for regular and continuous engagement. ", doi="10.2196/22007", url="http://games.jmir.org/2020/4/e22007/", url="http://www.ncbi.nlm.nih.gov/pubmed/33315015" } @Article{info:doi/10.2196/23963, author="Kotcherlakota, Suhasini and Pelish, Peggy and Hoffman, Katherine and Kupzyk, Kevin and Rejda, Patrick", title="Augmented Reality Technology as a Teaching Strategy for Learning Pediatric Asthma Management: Mixed Methods Study", journal="JMIR Nursing", year="2020", month="Dec", day="2", volume="3", number="1", pages="e23963", keywords="augmented reality", keywords="graduate nursing", keywords="pediatric asthma management", keywords="flipped learning", keywords="nursing", keywords="asthma", keywords="chronic disease", keywords="nurse practitioner", keywords="nursing students", keywords="pediatric asthma", abstract="Background: Asthma is a major chronic disease affecting 8.6\% of children in the United States. Objective: The purpose of this research was to assess the use of clinical simulation scenarios using augmented reality technology to evaluate learning outcomes for nurse practitioner students studying pediatric asthma management. Methods: A mixed-methods pilot study was conducted with 2 cohorts of graduate pediatric nurse practitioner students (N=21), with each cohort participating for 2 semesters. Results: Significant improvements in pediatric asthma test scores (P<.001) of student learning were found in both cohorts at posttest in both semesters. Student satisfaction with the augmented reality technology was found to be high. The focus group discussions revealed that the simulation was realistic and helpful for a flipped classroom approach. Conclusions: The study results suggest augmented reality simulation to be valuable in teaching pediatric asthma management content in graduate nursing education. ", doi="10.2196/23963", url="https://nursing.jmir.org/2001/1/e23963/", url="http://www.ncbi.nlm.nih.gov/pubmed/34406970" } @Article{info:doi/10.2196/18367, author="Dallas-Orr, David and Penev, Yordan and Schultz, Robert and Courtier, Jesse", title="Comparing Computed Tomography--Derived Augmented Reality Holograms to a Standard Picture Archiving and Communication Systems Viewer for Presurgical Planning: Feasibility Study", journal="JMIR Perioper Med", year="2020", month="Sep", day="24", volume="3", number="2", pages="e18367", keywords="augmented reality", keywords="mixed reality", keywords="picture archiving and communication system", keywords="presurgical planning", keywords="new technology evaluation", keywords="medical imaging", keywords="surgery", abstract="Background: Picture archiving and communication systems (PACS) are ubiquitously used to store, share, and view radiological information for preoperative planning across surgical specialties. Although traditional PACS software has proven reliable in terms of display accuracy and ease of use, it remains limited by its inherent representation of medical imaging in 2 dimensions. Augmented reality (AR) systems present an exciting opportunity to complement traditional PACS capabilities. Objective: This study aims to evaluate the technical feasibility of using a novel AR platform, with holograms derived from computed tomography (CT) imaging, as a supplement to traditional PACS for presurgical planning in complex surgical procedures. Methods: Independent readers measured objects of predetermined, anthropomorphically correlated sizes using the circumference and angle tools of standard-of-care PACS software and a newly developed augmented reality presurgical planning system (ARPPS). Results: Measurements taken with the standard PACS and the ARPPS showed no statistically significant differences. Bland-Altman analysis showed a mean difference of 0.08\% (95\% CI --4.20\% to 4.36\%) for measurements taken with PACS versus ARPPS' circumference tools and --1.84\% (95\% CI --6.17\% to 2.14\%) for measurements with the systems' angle tools. Lin's concordance correlation coefficients were 1.00 and 0.98 for the circumference and angle measurements, respectively, indicating almost perfect strength of agreement between ARPPS and PACS. Intraclass correlation showed no statistically significant difference between the readers for either measurement tool on each system. Conclusions: ARPPS can be an effective, accurate, and precise means of 3D visualization and measurement of CT-derived holograms in the presurgical care timeline. ", doi="10.2196/18367", url="http://periop.jmir.org/2020/2/e18367/", url="http://www.ncbi.nlm.nih.gov/pubmed/33393933" } @Article{info:doi/10.2196/17822, author="LaPiana, Nina and Duong, Alvin and Lee, Alex and Alschitz, Leon and Silva, L. Rafael M. and Early, Jody and Bunnell, Aaron and Mourad, Pierre", title="Acceptability of a Mobile Phone--Based Augmented Reality Game for Rehabilitation of Patients With Upper Limb Deficits from Stroke: Case Study", journal="JMIR Rehabil Assist Technol", year="2020", month="Sep", day="2", volume="7", number="2", pages="e17822", keywords="augmented reality", keywords="stroke", keywords="upper limb rehabilitation", keywords="gamification", keywords="motor rehabilitation", keywords="motivation", keywords="participation", abstract="Background: Upper limb functional deficits are common after stroke and result from motor weakness, ataxia, spasticity, spatial neglect, and poor stamina. Past studies employing a range of commercial gaming systems to deliver rehabilitation to stroke patients provided short-term efficacy but have not yet demonstrated whether or not those games are acceptable, that is, motivational, comfortable, and engaging, which are all necessary for potential adoption and use by patients. Objective: The goal of the study was to assess the acceptability of a smartphone-based augmented reality game as a means of delivering stroke rehabilitation for patients with upper limb motor function loss. Methods: Patients aged 50 to 70 years, all of whom experienced motor deficits after acute ischemic stroke, participated in 3 optional therapy sessions using augmented reality therapeutic gaming over the course of 1 week, targeting deficits in upper extremity strength and range of motion. After completion of the game, we administered a 16-item questionnaire to the patients to assess the game's acceptability; 8 questions were answered by rating on a scale from 1 (very negative experience) to 5 (very positive experience); 8 questions were qualitative. Results: Patients (n=5) completed a total of 23 out of 45 scheduled augmented reality game sessions, with patient fatigue as the primary factor for uncompleted sessions. Each patient consented to 9 potential game sessions and completed a mean of 4.6 (SE 1.3) games. Of the 5 patients, 4 (80\%) completed the questionnaire at the end of their final gaming session. Of note, patients were motivated to continue to the end of a given gaming session (mean 4.25, 95\% CI 3.31-5.19), to try other game-based therapies (mean 3.75, 95\% CI 2.81-4.69), to do another session (mean 3.50, 95\% CI 2.93-4.07), and to perform other daily rehabilitation exercises (mean 3.25, 95\% CI 2.76-3.74). In addition, participants gave mean scores of 4.00 (95\% CI 2.87-5.13) for overall experience; 4.25 (95\% CI 3.31-5.19) for comfort; 3.25 (95\% CI 2.31-4.19) for finding the study fun, enjoyable, and engaging; and 3.50 (95\% CI 2.52-4.48) for believing the technology could help them reach their rehabilitation goals. For each of the 4 patients, their reported scores were statistically significantly higher than those generated by a random sampling of values (patient 1: P=.04; patient 2: P=.04; patient 4: P=.004; patient 5: P=.04). Conclusions: Based on the questionnaire scores, the patients with upper limb motor deficits following stroke who participated in our case study found our augmented reality game motivating, comfortable, engaging, and tolerable. Improvements in augmented reality technology motivated by this case study may one day allow patients to work with improved versions of this therapy independently in their own home. We therefore anticipate that smartphone-based augmented reality gaming systems may eventually provide useful postdischarge self-treatment as a supplement to professional therapy for patients with upper limb deficiencies from stroke. ", doi="10.2196/17822", url="http://rehab.jmir.org/2020/2/e17822/", url="http://www.ncbi.nlm.nih.gov/pubmed/32876580" } @Article{info:doi/10.2196/21486, author="Martin, Guy and Koizia, Louis and Kooner, Angad and Cafferkey, John and Ross, Clare and Purkayastha, Sanjay and Sivananthan, Arun and Tanna, Anisha and Pratt, Philip and Kinross, James and ", title="Use of the HoloLens2 Mixed Reality Headset for Protecting Health Care Workers During the COVID-19 Pandemic: Prospective, Observational Evaluation", journal="J Med Internet Res", year="2020", month="Aug", day="14", volume="22", number="8", pages="e21486", keywords="COVID-19", keywords="mixed reality", keywords="telemedicine", keywords="protection", keywords="acceptability", keywords="feasibility", keywords="impact", keywords="headset", keywords="virtual reality", keywords="augmented reality", keywords="pilot", abstract="Background: The coronavirus disease (COVID-19) pandemic has led to rapid acceleration in the deployment of new digital technologies to improve both accessibility to and quality of care, and to protect staff. Mixed-reality (MR) technology is the latest iteration of telemedicine innovation; it is a logical next step in the move toward the provision of digitally supported clinical care and medical education. This technology has the potential to revolutionize care both during and after the COVID-19 pandemic. Objective: This pilot project sought to deploy the HoloLens2 MR device to support the delivery of remote care in COVID-19 hospital environments. Methods: A prospective, observational, nested cohort evaluation of the HoloLens2 was undertaken across three distinct clinical clusters in a teaching hospital in the United Kingdom. Data pertaining to staff exposure to high-risk COVID-19 environments and personal protective equipment (PPE) use by clinical staff (N=28) were collected, and assessments of acceptability and feasibility were conducted. Results: The deployment of the HoloLens2 led to a 51.5\% reduction in time exposed to harm for staff looking after COVID-19 patients (3.32 vs 1.63 hours/day/staff member; P=.002), and an 83.1\% reduction in the amount of PPE used (178 vs 30 items/round/day; P=.02). This represents 222.98 hours of reduced staff exposure to COVID-19, and 3100 fewer PPE items used each week across the three clusters evaluated. The majority of staff using the device agreed it was easy to set up and comfortable to wear, improved the quality of care and decision making, and led to better teamwork and communication. In total, 89.3\% (25/28) of users felt that their clinical team was safer when using the HoloLens2. Conclusions: New technologies have a role in minimizing exposure to nosocomial infection, optimizing the use of PPE, and enhancing aspects of care. Deploying such technologies at pace requires context-specific information security, infection control, user experience, and workflow integration to be addressed at the outset and led by clinical end-users. The deployment of new telemedicine technology must be supported with objective evidence for its safety and effectiveness to ensure maximum impact. ", doi="10.2196/21486", url="http://www.jmir.org/2020/8/e21486/", url="http://www.ncbi.nlm.nih.gov/pubmed/32730222" } @Article{info:doi/10.2196/18637, author="Muangpoon, Theerapat and Haghighi Osgouei, Reza and Escobar-Castillejos, David and Kontovounisios, Christos and Bello, Fernando", title="Augmented Reality System for Digital Rectal Examination Training and Assessment: System Validation", journal="J Med Internet Res", year="2020", month="Aug", day="13", volume="22", number="8", pages="e18637", keywords="Augmented Reality", keywords="Digital Rectal Examination (DRE)", keywords="Magnetic Tracker", keywords="Pressure Sensor", keywords="Medical Education", keywords="Usability", abstract="Background: Digital rectal examination is a difficult examination to learn and teach because of limited opportunities for practice; however, the main challenge is that students and tutors cannot see the finger when it is palpating the anal canal and prostate gland inside the patients. Objective: This paper presents an augmented reality system to be used with benchtop models commonly available in medical schools with the aim of addressing the problem of lack of visualization. The system enables visualization of the examining finger, as well as of the internal organs when performing digital rectal examinations. Magnetic tracking sensors are used to track the movement of the finger, and a pressure sensor is used to monitor the applied pressure. By overlaying a virtual finger on the real finger and a virtual model on the benchtop model, students can see through the examination and finger maneuvers. Methods: The system was implemented in the Unity game engine (Unity Technologies) and uses a first-generation HoloLens (Microsoft Inc) as an augmented reality device. To evaluate the system, 19 participants (9 clinicians who routinely performed digital rectal examinations and 10 medical students) were asked to use the system and answer 12 questions regarding the usefulness of the system. Results: The system showed the movement of an examining finger in real time with a frame rate of 60 fps on the HoloLens and accurately aligned the virtual and real models with a mean error of 3.9 mm. Users found the movement of the finger was realistic (mean 3.9, SD 1.2); moreover, they found the visualization of the finger and internal organs were useful for teaching, learning, and assessment of digital rectal examinations (finger: mean 4.1, SD 1.1; organs: mean 4.6, SD 0.8), mainly targeting a novice group. Conclusions: The proposed augmented reality system was designed to improve teaching and learning of digital rectal examination skills by providing visualization of the finger and internal organs. The initial user study proved its applicability and usefulness. ", doi="10.2196/18637", url="https://www.jmir.org/2020/8/e18637", url="http://www.ncbi.nlm.nih.gov/pubmed/32788146" } @Article{info:doi/10.2196/17147, author="Hilt, D. Alexander and Mamaqi Kapllani, Kevin and Hierck, P. Beerend and Kemp, C. Anne and Albayrak, Armagan and Melles, Marijke and Schalij, J. Martin and Scherptong, C. Roderick W.", title="Perspectives of Patients and Professionals on Information and Education After Myocardial Infarction With Insight for Mixed Reality Implementation: Cross-Sectional Interview Study", journal="JMIR Hum Factors", year="2020", month="Jun", day="23", volume="7", number="2", pages="e17147", keywords="human factors", keywords="myocardial infarction", keywords="mixed reality", keywords="patient education", keywords="patient experience", keywords="PROM", abstract="Background: Patient education is crucial in the secondary prevention of cardiovascular disease. Novel technologies such as augmented reality or mixed reality expand the possibilities for providing visual support in this process. Mixed reality creates interactive digital three-dimensional (3D) projections overlaying virtual objects on the real-world environment. While augmented reality only overlays objects, mixed reality not just overlays but anchors virtual objects to the real world. However, research on this technology in the patient domain is scarce. Objective: The aim of this study was to understand how patients perceive information provided after myocardial infarction and examine if mixed reality can be supportive in this process. Methods: In total, 12 patients that experienced myocardial infarction and 6 health care professionals were enrolled in the study. Clinical, demographic, and qualitative data were obtained through semistructured interviews, with a main focus on patient experiences within the hospital and the knowledge they gained about their disease. These data were then used to map a susceptible timeframe to identify how mixed reality can contribute to patient information and education. Results: Knowledge transfer after myocardial infarction was perceived by patients as too extensive, not personal, and inconsistent. Notably, knowledge on anatomy and medication was minimal and was not recognized as crucial by patients, whereas professionals stated the opposite. Patient journey analysis indicated the following four critical phases of knowledge transfer: at hospital discharge, at the first outpatient visit, during rehabilitation, and during all follow-up outpatient visits. Important patient goals were understanding the event in relation to daily life and its implications on resuming daily life. During follow-up, understanding physical limitations and coping with the condition and medication side effects in daily life emerged as the most important patient goals. The professionals' goals were to improve recovery, enhance medication adherence, and offer coping support. Conclusions: There is a remarkable difference between patients' and professionals' goals regarding information and education after myocardial infarction. Mixed reality may be a practical tool to unite perspectives of patients and professionals on the disease in a more even manner, and thus optimize knowledge transfer after myocardial infarction. Improving medication knowledge seems to be a feasible target for mixed reality. However, further research is needed to create durable methods for education on medication through mixed reality interventions. ", doi="10.2196/17147", url="http://humanfactors.jmir.org/2020/2/e17147/", url="http://www.ncbi.nlm.nih.gov/pubmed/32573464" } @Article{info:doi/10.2196/17804, author="Held, Oskar Jeremia Philipp and Yu, Kevin and Pyles, Connor and Veerbeek, Marieke Janne and Bork, Felix and Heining, Sandro-Michael and Navab, Nassir and Luft, R{\"u}diger Andreas", title="Augmented Reality--Based Rehabilitation of Gait Impairments: Case Report", journal="JMIR Mhealth Uhealth", year="2020", month="May", day="26", volume="8", number="5", pages="e17804", keywords="HoloLens 2", keywords="gait", keywords="rehabilitation", keywords="stroke", keywords="augmented reality", keywords="sensors", abstract="Background: Gait and balance impairments are common in neurological diseases, including stroke, and negatively affect patients' quality of life. Improving balance and gait are among the main goals of rehabilitation. Rehabilitation is mainly performed in clinics, which lack context specificity; therefore, training in the patient's home environment is preferable. In the last decade, developed rehabilitation technologies such as virtual reality and augmented reality (AR) have enabled gait and balance training outside clinics. Here, we propose a new method for gait rehabilitation in persons who have had a stroke in which mobile AR technology and a sensor-based motion capture system are combined to provide fine-grained feedback on gait performance in real time. Objective: The aims of this study were (1) to investigate manipulation of the gait pattern of persons who have had a stroke based on virtual augmentation during overground walking compared to walking without AR performance feedback and (2) to investigate the usability of the AR system. Methods: We developed the ARISE (Augmented Reality for gait Impairments after StrokE) system, in which we combined a development version of HoloLens 2 smart glasses (Microsoft Corporation) with a sensor-based motion capture system. One patient with chronic minor gait impairment poststroke completed clinical gait assessments and an AR parkour course with patient-centered performance gait feedback. The movement kinematics during gait as well as the usability and safety of the system were evaluated. Results: The patient changed his gait pattern during AR parkour compared to the pattern observed during the clinical gait assessments. He recognized the virtual objects and ranked the usability of the ARISE system as excellent. In addition, the patient stated that the system would complement his standard gait therapy. Except for the symptom of exhilaration, no adverse events occurred. Conclusions: This project provided the first evidence of gait adaptation during overground walking based on real-time feedback through visual and auditory augmentation. The system has potential to provide gait and balance rehabilitation outside the clinic. This initial investigation of AR rehabilitation may aid the development and investigation of new gait and balance therapies. ", doi="10.2196/17804", url="http://mhealth.jmir.org/2020/5/e17804/", url="http://www.ncbi.nlm.nih.gov/pubmed/32452815" } @Article{info:doi/10.2196/14910, author="Ingrassia, Luigi Pier and Mormando, Giulia and Giudici, Eleonora and Strada, Francesco and Carfagna, Fabio and Lamberti, Fabrizio and Bottino, Andrea", title="Augmented Reality Learning Environment for Basic Life Support and Defibrillation Training: Usability Study", journal="J Med Internet Res", year="2020", month="May", day="12", volume="22", number="5", pages="e14910", keywords="augmented reality", keywords="cardiopulmonary resuscitation", keywords="automated external defibrillators", abstract="Background: Basic life support (BLS) is crucial in the emergency response system, as sudden cardiac arrest is still a major cause of death worldwide. Unfortunately, only a minority of victims receive cardiopulmonary resuscitation (CPR) from bystanders. In this context, training could be helpful to save more lives, and technology-enhanced BLS simulation is one possible solution. Objective: The aim of this study is to assess the feasibility and acceptability of our augmented reality (AR) prototype as a tool for BLS training. Methods: Holo-BLSD is an AR self-instruction training system, in which a standard CPR manikin is ``augmented'' with an interactive virtual environment that reproduces realistic scenarios. Learners can use natural gestures, body movements, and spoken commands to perform their tasks, with virtual 3D objects anchored to the manikin and the environment. During the experience, users were trained to use the device while being guided through an emergency simulation and, at the end, were asked to complete a survey to assess the feasibility and acceptability of the proposed tool (5-point Likert scale; 1=Strongly Disagree, 5=Strongly Agree). Results: The system was rated easy to use (mean 4.00, SD 0.94), and the trainees stated that most people would learn to use it very quickly (mean 4.00, SD 0.89). Voice (mean 4.48, SD 0.87), gaze (mean 4.12, SD 0.97), and gesture interaction (mean 3.84, SD 1.14) were judged positively, although some hand gesture recognition errors reduced the feeling of having the right level of control over the system (mean 3.40, SD 1.04). Conclusions: We found the Holo-BLSD system to be a feasible and acceptable tool for AR BLS training. ", doi="10.2196/14910", url="https://www.jmir.org/2020/5/e14910", url="http://www.ncbi.nlm.nih.gov/pubmed/32396128" } @Article{info:doi/10.2196/13810, author="Nag, Anish and Haber, Nick and Voss, Catalin and Tamura, Serena and Daniels, Jena and Ma, Jeffrey and Chiang, Bryan and Ramachandran, Shasta and Schwartz, Jessey and Winograd, Terry and Feinstein, Carl and Wall, P. Dennis", title="Toward Continuous Social Phenotyping: Analyzing Gaze Patterns in an Emotion Recognition Task for Children With Autism Through Wearable Smart Glasses", journal="J Med Internet Res", year="2020", month="Apr", day="22", volume="22", number="4", pages="e13810", keywords="autism spectrum disorder", keywords="translational medicine", keywords="eye tracking", keywords="wearable technologies", keywords="artificial intelligence", keywords="machine learning", keywords="precision health", keywords="digital therapy", abstract="Background: Several studies have shown that facial attention differs in children with autism. Measuring eye gaze and emotion recognition in children with autism is challenging, as standard clinical assessments must be delivered in clinical settings by a trained clinician. Wearable technologies may be able to bring eye gaze and emotion recognition into natural social interactions and settings. Objective: This study aimed to test: (1) the feasibility of tracking gaze using wearable smart glasses during a facial expression recognition task and (2) the ability of these gaze-tracking data, together with facial expression recognition responses, to distinguish children with autism from neurotypical controls (NCs). Methods: We compared the eye gaze and emotion recognition patterns of 16 children with autism spectrum disorder (ASD) and 17 children without ASD via wearable smart glasses fitted with a custom eye tracker. Children identified static facial expressions of images presented on a computer screen along with nonsocial distractors while wearing Google Glass and the eye tracker. Faces were presented in three trials, during one of which children received feedback in the form of the correct classification. We employed hybrid human-labeling and computer vision--enabled methods for pupil tracking and world--gaze translation calibration. We analyzed the impact of gaze and emotion recognition features in a prediction task aiming to distinguish children with ASD from NC participants. Results: Gaze and emotion recognition patterns enabled the training of a classifier that distinguished ASD and NC groups. However, it was unable to significantly outperform other classifiers that used only age and gender features, suggesting that further work is necessary to disentangle these effects. Conclusions: Although wearable smart glasses show promise in identifying subtle differences in gaze tracking and emotion recognition patterns in children with and without ASD, the present form factor and data do not allow for these differences to be reliably exploited by machine learning systems. Resolving these challenges will be an important step toward continuous tracking of the ASD phenotype. ", doi="10.2196/13810", url="http://www.jmir.org/2020/4/e13810/", url="http://www.ncbi.nlm.nih.gov/pubmed/32319961" } @Article{info:doi/10.2196/16055, author="Romare, Charlotte and Sk{\"a}r, Lisa", title="Smart Glasses for Caring Situations in Complex Care Environments: Scoping Review", journal="JMIR Mhealth Uhealth", year="2020", month="Apr", day="20", volume="8", number="4", pages="e16055", keywords="anesthesia department", keywords="critical care", keywords="intensive care units", keywords="scoping review", keywords="smart glasses", abstract="Background: Anesthesia departments and intensive care units represent two advanced, high-tech, and complex care environments. Health care in those environments involves different types of technology to provide safe, high-quality care. Smart glasses have previously been used in different health care settings and have been suggested to assist health care professionals in numerous areas. However, smart glasses in the complex contexts of anesthesia care and intensive care are new and innovative. An overview of existing research related to these contexts is needed before implementing smart glasses into complex care environments. Objective: The aim of this study was to highlight potential benefits and limitations with health care professionals' use of smart glasses in situations occurring in complex care environments. Methods: A scoping review with six steps was conducted to fulfill the objective. Database searches were conducted in PubMed and Scopus; original articles about health care professionals' use of smart glasses in complex care environments and/or situations occurring in those environments were included. The searches yielded a total of 20 articles that were included in the review. Results: Three categories were created during the qualitative content analysis: (1) smart glasses as a versatile tool that offers opportunities and challenges, (2) smart glasses entail positive and negative impacts on health care professionals, and (3) smart glasses' quality of use provides facilities and leaves room for improvement. Smart glasses were found to be both a helpful tool and a hindrance in caring situations that might occur in complex care environments. This review provides an increased understanding about different situations where smart glasses might be used by health care professionals in clinical practice in anesthesia care and intensive care; however, research about smart glasses in clinical complex care environments is limited. Conclusions: Thoughtful implementation and improved hardware are needed to meet health care professionals' needs. New technology brings challenges; more research is required to elucidate how smart glasses affect patient safety, health care professionals, and quality of care in complex care environments. ", doi="10.2196/16055", url="http://mhealth.jmir.org/2020/4/e16055/", url="http://www.ncbi.nlm.nih.gov/pubmed/32310144" } @Article{info:doi/10.2196/16852, author="Jiang, Taoran and Yu, Dewang and Wang, Yuqi and Zan, Tao and Wang, Shuyi and Li, Qingfeng", title="HoloLens-Based Vascular Localization System: Precision Evaluation Study With a Three-Dimensional Printed Model", journal="J Med Internet Res", year="2020", month="Apr", day="17", volume="22", number="4", pages="e16852", keywords="augmented reality", keywords="HoloLens", keywords="perforator flap", keywords="vascular localization", keywords="reconstructive surgery", keywords="3D Printing", abstract="Background: Vascular localization is crucial for perforator flap transfer. Augmented reality offers a novel method to seamlessly combine real information with virtual objects created by computed tomographic angiography to help the surgeon ``see through'' the skin and precisely localize the perforator. The head-mounted display augmented reality system HoloLens (Microsoft) could facilitate augmented reality--based perforator localization for a more convenient and safe procedure. Objective: The aim of this study was to evaluate the precision of the HoloLens-based vascular localization system, as the most important performance indicator of a new localization system. Methods: The precision of the HoloLens-based vascular localization system was tested in a simulated operating room under different conditions with a three-dimensional (3D) printed model. The coordinates of five pairs of points on the vascular map that could be easily identified on the 3D printed model and virtual model were detected by a probe, and the distance between the corresponding points was calculated as the navigation error. Results: The mean errors were determined under different conditions, with a minimum error of 1.35 mm (SD 0.43) and maximum error of 3.18 mm (SD 1.32), which were within the clinically acceptable range. There were no significant differences in the errors obtained under different visual angles, different light intensities, or different states (static or motion). However, the error was larger when tested with light compared with that tested without light. Conclusions: This precision evaluation demonstrated that the HoloLens system can precisely localize the perforator and potentially help the surgeon accomplish the operation. The authors recommend using HoloLens-based surgical navigation without light. ", doi="10.2196/16852", url="http://www.jmir.org/2020/4/e16852/", url="http://www.ncbi.nlm.nih.gov/pubmed/32301738" } @Article{info:doi/10.2196/10987, author="Amantini, Ribeiro Susy Nazar{\'e} Silva and Montilha, Pascotto Alexandre Alberto and Antonelli, Caseiro Bianca and Leite, Moura Kim Tanabe and Rios, Daniela and Cruvinel, Thiago and Louren{\c{c}}o Neto, Natalino and Oliveira, Marchini Thais and Machado, Moreira Maria Aparecida Andrade", title="Using Augmented Reality to Motivate Oral Hygiene Practice in Children: Protocol for the Development of a Serious Game", journal="JMIR Res Protoc", year="2020", month="Jan", day="17", volume="9", number="1", pages="e10987", keywords="video games", keywords="education, dental", keywords="user-computer interface", keywords="computer simulation", keywords="pediatric dentistry", abstract="Background: New technologies create possible new ways of action, interaction, and learning which is extremely relevant in the field of oral health education. There is a lack of protocol in using an immersive interactive ludic-educational interface to motivate oral hygiene practice in children by means of augmented reality. Objective: This study aims to present a protocol on the development of a serious game to motivate oral hygiene practice in children. Methods: A serious game will be designed by augmented reality techniques to improve toothbrushing effectiveness of children aged 6 to 10 years. The functional structure of this interface is activated by means of movements recognized by Kinect (Microsoft Corp). The toothbrushing technique will be available in the game, enabling the children to execute the movement in the virtual environment. By identifying errors, this game will be tailored to improve the oral health of children by correcting the technique and teaching the user the adequate toothbrushing method. A template analysis will be performed to identify barriers and facilitators in each scenario. Results: After the implementation of the virtual interactive and immersive panels, enrollment will begin and evaluations will be made by means of questionnaires distributed to participants who interact with the game. Thus, an analysis of the product efficacy will be conducted. The expected outcome will be to obtain a digital instrument to motivate oral hygiene practice and enhance health awareness in children. Conclusions: The serious game will support the prevention of oral diseases by sharing scientific research in the school environment and community. International Registered Report Identifier (IRRID): PRR1-10.2196/10987 ", doi="10.2196/10987", url="https://www.researchprotocols.org/2020/1/e10987", url="http://www.ncbi.nlm.nih.gov/pubmed/31951216" } @Article{info:doi/10.2196/13722, author="Buimer, Hendrik and Schellens, Renske and Kostelijk, Tjerk and Nemri, Abdellatif and Zhao, Yan and Van der Geest, Thea and Van Wezel, Richard", title="Opportunities and Pitfalls in Applying Emotion Recognition Software for Persons With a Visual Impairment: Simulated Real Life Conversations", journal="JMIR Mhealth Uhealth", year="2019", month="Nov", day="21", volume="7", number="11", pages="e13722", keywords="visual impairment", keywords="emotion recognition", keywords="tactile", keywords="social interaction", abstract="Background: A large part of the communication cues exchanged between persons is nonverbal. Persons with a visual impairment are often unable to perceive these cues, such as gestures or facial expression of emotions. In a previous study, we have determined that visually impaired persons can increase their ability to recognize facial expressions of emotions from validated pictures and videos by using an emotion recognition system that signals vibrotactile cues associated with one of the six basic emotions. Objective: The aim of this study was to determine whether the previously tested emotion recognition system worked equally well in realistic situations and under controlled laboratory conditions. Methods: The emotion recognition system consists of a camera mounted on spectacles, a tablet running facial emotion recognition software, and a waist belt with vibrotactile stimulators to provide haptic feedback representing Ekman's six universal emotions. A total of 8 visually impaired persons (4 females and 4 males; mean age 46.75 years, age range 28-66 years) participated in two training sessions followed by one experimental session. During the experiment, participants engaged in two 15 minute conversations, in one of which they wore the emotion recognition system. To conclude the study, exit interviews were conducted to assess the experiences of the participants. Due to technical issues with the registration of the emotion recognition software, only 6 participants were included in the video analysis. Results: We found that participants were quickly able to learn, distinguish, and remember vibrotactile signals associated with the six emotions. A total of 4 participants felt that they were able to use the vibrotactile signals in the conversation. Moreover, 5 out of the 6 participants had no difficulties in keeping the camera focused on the conversation partner. The emotion recognition was very accurate in detecting happiness but performed unsatisfactorily in recognizing the other five universal emotions. Conclusions: The system requires some essential improvements in performance and wearability before it is ready to support visually impaired persons in their daily life interactions. Nevertheless, the participants saw potential in the system as an assistive technology, assuming their user requirements can be met. ", doi="10.2196/13722", url="https://mhealth.jmir.org/2019/11/e13722", url="http://www.ncbi.nlm.nih.gov/pubmed/31750838" } @Article{info:doi/10.2196/13594, author="Hu, Xiao-Su and Nascimento, D. Thiago and Bender, C. Mary and Hall, Theodore and Petty, Sean and O'Malley, Stephanie and Ellwood, P. Roger and Kaciroti, Niko and Maslowski, Eric and DaSilva, F. Alexandre", title="Feasibility of a Real-Time Clinical Augmented Reality and Artificial Intelligence Framework for Pain Detection and Localization From the Brain", journal="J Med Internet Res", year="2019", month="Jun", day="28", volume="21", number="6", pages="e13594", keywords="pain", keywords="spectroscopy, near-infrared", keywords="virtual reality", keywords="artificial intelligence", abstract="Background: For many years, clinicians have been seeking for objective pain assessment solutions via neuroimaging techniques, focusing on the brain to detect human pain. Unfortunately, most of those techniques are not applicable in the clinical environment or lack accuracy. Objective: This study aimed to test the feasibility of a mobile neuroimaging-based clinical augmented reality (AR) and artificial intelligence (AI) framework, CLARAi, for objective pain detection and also localization direct from the patient's brain in real time. Methods: Clinical dental pain was triggered in 21 patients by hypersensitive tooth stimulation with 20 consecutive descending cold stimulations (32{\textdegree}C-0{\textdegree}C). We used a portable optical neuroimaging technology, functional near-infrared spectroscopy, to gauge their cortical activity during evoked acute clinical pain. The data were decoded using a neural network (NN)--based AI algorithm to classify hemodynamic response data into pain and no-pain brain states in real time. We tested the performance of several networks (NN with 7 layers, 6 layers, 5 layers, 3 layers, recurrent NN, and long short-term memory network) upon reorganized data features on pain diction and localization in a simulated real-time environment. In addition, we also tested the feasibility of transmitting the neuroimaging data to an AR device, HoloLens, in the same simulated environment, allowing visualization of the ongoing cortical activity on a 3-dimensional brain template virtually plotted on the patients' head during clinical consult. Results: The artificial neutral network (3-layer NN) achieved an optimal classification accuracy at 80.37\% (126,000/156,680) for pain and no pain discrimination, with positive likelihood ratio (PLR) at 2.35. We further explored a 3-class localization task of left/right side pain and no-pain states, and convolutional NN-6 (6-layer NN) achieved highest classification accuracy at 74.23\% (1040/1401) with PLR at 2.02. Conclusions: Additional studies are needed to optimize and validate our prototype CLARAi framework for other pains and neurologic disorders. However, we presented an innovative and feasible neuroimaging-based AR/AI concept that can potentially transform the human brain into an objective target to visualize and precisely measure and localize pain in real time where it is most needed: in the doctor's office. International Registered Report Identifier (IRRID): RR1-10.2196/13594 ", doi="10.2196/13594", url="https://www.jmir.org/2019/6/e13594/", url="http://www.ncbi.nlm.nih.gov/pubmed/31254336" } @Article{info:doi/10.2196/13447, author="Boillat, Thomas and Grantcharov, Peter and Rivas, Homero", title="Increasing Completion Rate and Benefits of Checklists: Prospective Evaluation of Surgical Safety Checklists With Smart Glasses", journal="JMIR Mhealth Uhealth", year="2019", month="Apr", day="29", volume="7", number="4", pages="e13447", keywords="smart glasses", keywords="surgical safety checklists", keywords="surgery", keywords="usability", keywords="time-out event", abstract="Background: Studies have demonstrated that surgical safety checklists (SSCs) can significantly reduce surgical complications and mortality rates. Such lists rely on traditional posters or paper, and their contents are generic regarding the type of surgery being performed. SSC completion rates and uniformity of content have been reported as modest and widely variable. Objective: This study aimed to investigate the feasibility and potential of using smart glasses in the operating room to increase the benefits of SSCs by improving usability through contextualized content and, ideally, resulting in improved completion rates. Methods: We prospectively evaluated and compared 80 preoperative time-out events with SSCs at a major academic medical center between June 2016 and February 2017. Participants were assigned to either a conventional checklist approach (poster, memory, or both) or a smart glasses app running on Google Glass. Results: Four different surgeons conducted 41 checklists using conventional methods (ie, memory or poster) and 39 using the smart glasses app. The average checklist completion rate using conventional methods was 76\%. Smart glasses allowed a completion rate of up to 100\% with a decrease in average checklist duration of 18\%. Conclusions: Compared with alternatives such as posters, paper, and memory, smart glasses checklists are easier to use and follow. The glasses allowed surgeons to use contextualized time-out checklists, which increased the completion rate to 100\% and reduced the checklist execution time and time required to prepare the equipment during surgical cases. ", doi="10.2196/13447", url="http://mhealth.jmir.org/2019/4/e13447/", url="http://www.ncbi.nlm.nih.gov/pubmed/31033451" } @Article{info:doi/10.2196/10967, author="Eckert, Martin and Volmerg, S. Julia and Friedrich, M. Christoph", title="Augmented Reality in Medicine: Systematic and Bibliographic Review", journal="JMIR Mhealth Uhealth", year="2019", month="Apr", day="26", volume="7", number="4", pages="e10967", keywords="mixed/augmented reality", keywords="medicine", keywords="mobile computing", keywords="systematic review", keywords="mobile phone", abstract="Background: Augmented reality (AR) is a technology that integrates digital information into the user's real-world environment. It offers a new approach for treatments and education in medicine. AR aids in surgery planning and patient treatment and helps explain complex medical situations to patients and their relatives. Objective: This systematic and bibliographic review offers an overview of the development of apps in AR with a medical use case from March 2012 to June 2017. This work can aid as a guide to the literature and categorizes the publications in the field of AR research. Methods: From March 2012 to June 2017, a total of 1309 publications from PubMed and Scopus databases were manually analyzed and categorized based on a predefined taxonomy. Of the total, 340 duplicates were removed and 631 publications were excluded due to incorrect classification or unavailable technical data. The remaining 338 publications were original research studies on AR. An assessment of the maturity of the projects was conducted on these publications by using the technology readiness level. To provide a comprehensive process of inclusion and exclusion, the authors adopted the Preferred Reporting Items for Systematic Reviews and Meta-Analyses statement. Results: The results showed an increasing trend in the number of publications on AR in medicine. There were no relevant clinical trials on the effect of AR in medicine. Domains that used display technologies seemed to be researched more than other medical fields. The technology readiness level showed that AR technology is following a rough bell curve from levels 4 to 7. Current AR technology is more often applied to treatment scenarios than training scenarios. Conclusions: This work discusses the applicability and future development of augmented- and mixed-reality technologies such as wearable computers and AR devices. It offers an overview of current technology and a base for researchers interested in developing AR apps in medicine. The field of AR is well researched, and there is a positive trend in its application, but its use is still in the early stages in the field of medicine and it is not widely adopted in clinical practice. Clinical studies proving the effectiveness of applied AR technologies are still lacking. ", doi="10.2196/10967", url="http://mhealth.jmir.org/2019/4/e10967/", url="http://www.ncbi.nlm.nih.gov/pubmed/31025950" } @Article{info:doi/10.2196/12368, author="Munzer, William Brendan and Khan, Mairaj Mohammad and Shipman, Barbara and Mahajan, Prashant", title="Augmented Reality in Emergency Medicine: A Scoping Review", journal="J Med Internet Res", year="2019", month="Apr", day="17", volume="21", number="4", pages="e12368", keywords="augmented reality", keywords="emergency medicine", keywords="education", keywords="telemedicine", abstract="Background: Augmented reality is increasingly being investigated for its applications to medical specialties as well as in medical training. Currently, there is little information about its applicability to training and care delivery in the context of emergency medicine. Objective: The objective of this article is to review current literature related to augmented reality applicable to emergency medicine and its training. Methods: Through a scoping review utilizing Scopus, MEDLINE, and Embase databases for article searches, we identified articles involving augmented reality that directly involved emergency medicine or was in an area of education or clinical care that could be potentially applied to emergency medicine. Results: A total of 24 articles were reviewed in detail and were categorized into three groups: user-environment interface, telemedicine and prehospital care, and education and training. Conclusions: Through analysis of the current literature across fields, we were able to demonstrate that augmented reality has utility and feasibility in clinical care delivery in patient care settings, in operating rooms and inpatient settings, and in education and training of emergency care providers. Additionally, we found that the use of augmented reality for care delivery over distances is feasible, suggesting a role in telehealth. Our results from the review of the literature in emergency medicine and other specialties reveal that further research into the uses of augmented reality will have a substantial role in changing how emergency medicine as a specialty will deliver care and provide education and training. ", doi="10.2196/12368", url="http://www.jmir.org/2019/4/e12368/", url="http://www.ncbi.nlm.nih.gov/pubmed/30994463" } @Article{info:doi/10.2196/12207, author="Weichelt, Bryan and Heimonen, Tomi and Pilz, Matthew and Yoder, Aaron and Bendixsen, Casper", title="An Argument Against Cross-Platform Development: Lessons From an Augmented Reality App Prototype for Rural Emergency Responders", journal="JMIR Mhealth Uhealth", year="2019", month="Mar", day="28", volume="7", number="3", pages="e12207", keywords="rural health", keywords="mhealth", keywords="telemedicine", keywords="emergency medical services", abstract="Background: Mobile augmented reality (MAR) apps offer potential support for emergency responders in rural areas. Objective: In this report, we described lessons learned from the development process of augmented reality (AR) Farm Mapping to Assist, Protect and Prepare Emergency Responders (MAPPER), a MAR app that provides emergency responders onsite information about the agricultural operation they enter. Methods: Cross-platform frameworks were used to create AR MAPPER to accommodate budget constraints and overcome issues with markerless MAR technologies. Although the single codebase and Web technologies streamlined development, cross-device hardware limitations impacted location accuracy, lengthened the development cycle, and required regular updates to third-party libraries. Results: A hybrid development approach of using Web-based technologies with native tie-ins for specialized components and enhanced performance cut time and costs. This also led to consistency across multiple platforms and ensured that there is only a single set of source files to modify for Android and iPhone operating systems. Meanwhile, active development was delayed by some major hurdles. Apple and Google both released new versions of their operating systems, and the Wikitude framework issued four major updates, each of which brought with it some important enhancements and also led to some new issues. Conclusions: Developers should consider single platform native development to benefit from platform-specific MAR implementations and to avoid development, testing, and maintenance costs associated with cross-platform implementation. Emergency response organizations may be more likely to utilize a single platform across the devices used by their command staff. This also reduces the benefits of cross-platform development. Furthermore, providing map-based, non-AR cross-platform apps for landowners, farmers, and ranchers would help improve and maintain data quality, which is crucial for the utility and user experience of MAR apps. ", doi="10.2196/12207", url="http://mhealth.jmir.org/2019/3/e12207/", url="http://www.ncbi.nlm.nih.gov/pubmed/30920380" } @Article{info:doi/10.2196/11251, author="Kim, Cheol-Hwan and Ryu, Seon-Young and Yoon, Ji-Young and Lee, Hyoung-Kwon and Choi, Nak-Gu and Park, Il-Ho and Choi, Hae-Young", title="See-Through Type 3D Head-Mounted Display--Based Surgical Microscope System for Microsurgery: A Feasibility Study", journal="JMIR Mhealth Uhealth", year="2019", month="Mar", day="07", volume="7", number="3", pages="e11251", keywords="3D imaging", keywords="head-mounted display", keywords="microsurgery", keywords="surgical microscope", abstract="Background: The surgical microscope is used primarily for microsurgeries, which are more complicated than other surgical procedures and require delicate tasks for a long time. Therefore, during these surgical procedures, surgeons experience back and neck pain. To solve this problem, new technology, such as wearable displays, is required to help surgeons maintain comfortable postures and enjoy advanced functionality during microsurgery. Objective: The objective of this study was to develop a surgical microscope system that would work with wearable devices. It would include a head-mounted display (HMD) that can offer 3D surgical images and allow a flexible and comfortable posture instead of fixed eyepieces of surgical microscope and can also provide peripheral visual field with its optical see-through function. Methods: We designed and fabricated a surgical microscope system that incorporates a see-through type 3D HMD, and we developed an image processing software to provide better image quality. The usability of the proposed system was confirmed with preclinical examination. Seven ENT (ear, nose, and throat) surgical specialists and 8 residents performed a mock surgery---axillary lymph node dissection on a rat. They alternated between looking through the eyepieces of the surgical microscope and viewing a 3D HMD screen connected to the surgical microscope. We examined the success of the surgery and asked the specialists and residents to grade eye fatigue on a scale of 0 (none) to 6 (severe) and posture discomfort on a scale of 1 (none) to 5 (severe). Furthermore, a statistical comparison was performed using 2-tailed paired t test, and P=.00083 was considered significant. Results: Although 3D HMD case showed a slightly better result regarding visual discomfort (P=.097), the average eye fatigue was not significantly different between eyepiece and 3D HMD cases (P=.79). However, the average posture discomfort, especially in neck and shoulder, was lower with 3D HMD display use than with eyepiece use (P=.00083). Conclusions: We developed a see-through type 3D HMD--based surgical microscope system and showed through preclinical testing that the system could help reduce posture discomfort. The proposed system, with its advanced functions, could be a promising new technique for microsurgery. ", doi="10.2196/11251", url="https://mhealth.jmir.org/2019/3/e11251/", url="http://www.ncbi.nlm.nih.gov/pubmed/30843867" } @Article{info:doi/10.2196/11939, author="Follmann, Andreas and Ohligs, Marian and Hochhausen, Nadine and Beckers, K. Stefan and Rossaint, Rolf and Czaplik, Michael", title="Technical Support by Smart Glasses During a Mass Casualty Incident: A Randomized Controlled Simulation Trial on Technically Assisted Triage and Telemedical App Use in Disaster Medicine", journal="J Med Internet Res", year="2019", month="Jan", day="03", volume="21", number="1", pages="e11939", keywords="augmented reality", keywords="disaster medicine", keywords="emergency medical service physician", keywords="mass casualty incident", keywords="Smart Glasses", keywords="telemedicine", keywords="triage", abstract="Background: To treat many patients despite lacking personnel resources, triage is important in disaster medicine. Various triage algorithms help but often are used incorrectly or not at all. One potential problem-solving approach is to support triage with Smart Glasses. Objective: In this study, augmented reality was used to display a triage algorithm and telemedicine assistance was enabled to compare the duration and quality of triage with a conventional one. Methods: A specific Android app was designed for use with Smart Glasses, which added information in terms of augmented reality with two different methods---through the display of a triage algorithm in data glasses and a telemedical connection to a senior emergency physician realized by the integrated camera. A scenario was created (ie, randomized simulation study) in which 31 paramedics carried out a triage of 12 patients in 3 groups as follows: without technical support (control group), with a triage algorithm display, and with telemedical contact. Results: A total of 362 assessments were performed. The accuracy in the control group was only 58\%, but the assessments were quicker (on average 16.6 seconds). In contrast, an accuracy of 92\% (P=.04) was achieved when using technical support by displaying the triage algorithm. This triaging took an average of 37.0 seconds. The triage group wearing data glasses and being telemedically connected achieved 90\% accuracy (P=.01) in 35.0 seconds. Conclusions: Triage with data glasses required markedly more time. While only a tally was recorded in the control group, Smart Glasses led to digital capture of the triage results, which have many tactical advantages. We expect a high potential in the application of Smart Glasses in disaster scenarios when using telemedicine and augmented reality features to improve the quality of triage. ", doi="10.2196/11939", url="https://www.jmir.org/2019/1/e11939/", url="http://www.ncbi.nlm.nih.gov/pubmed/30609988" } @Article{info:doi/10.2196/pediatrics.9576, author="Bakker, Aafke and Janssen, Lindy and Noordam, Cees", title="Home to Hospital Live Streaming With Virtual Reality Goggles: A Qualitative Study Exploring the Experiences of Hospitalized Children", journal="JMIR Pediatr Parent", year="2018", month="Dec", day="13", volume="1", number="2", pages="e10", keywords="experiences", keywords="hospitalization", keywords="mobile phone", keywords="livestream", keywords="pediatrics", keywords="qualitative analysis", keywords="videoconferencing", keywords="virtual reality", abstract="Background: Being separated from home and relatives is a major stressor for children and adolescents when hospitalized. Children long for a manner to be distracted, pleasured, and socially connected during hospitalization. Different technological devices have been applied in health care to answer those needs. Both virtual reality (VR) and videoconferencing have proven their value in hospital wards and pediatrics. VisitU combines these 2 technologies innovatively. VisitU is a recently launched VR product enabling users to be virtually at home during hospitalization. Objective: This study aims to explore the experiences of hospitalized patients with the VR intervention of VisitU in addition to standard care. Methods: Over a 3-month period, a purposive sample of 10 patients hospitalized in the Radboudumc Amalia Children's Hospital was included in this qualitative study. Semistructured interviews were performed, one before and one after the use of the VR device. Patients were asked open-ended questions concerning their experiences with VisitU on practical, cognitive, emotional, and social domains. The interviews were audiorecorded and transcribed verbatim. Atlas.ti was used to support the qualitative analysis. Furthermore, the inductive thematic analysis was done according to the 6-step procedure described by Braun and Clarke. Results: The following 6 main themes were the result of the qualitative analysis: ``Being hospitalized,'' ``Expectations of VisitU,'' ``VisitU in use,'' ``VisitU, the benefits,'' ``The impact of VisitU,'' and ``Barriers when using VisitU.'' The way VisitU was used by patients varied. The main benefits of VisitU were being somewhere else, being at home, and facilitating social connection. Limitations were experienced on the technical abilities, physical side effects, and complexity of use. Despite that, patients were positive about VisitU and unanimous in the view that they would like to use it again and advise other patients to use it. Conclusions: This study shows the positive experiences of pediatric patients with VR live streaming. VisitU brings together the needs of patients and possibilities of VR and videoconferencing; it offers patients a way out of the hospital. Nevertheless, practical and technical obstacles must be overcome and side effects are an area of further research. ", doi="10.2196/pediatrics.9576", url="http://pediatrics.jmir.org/2018/2/e10/", url="http://www.ncbi.nlm.nih.gov/pubmed/31518293" } @Article{info:doi/10.2196/mhealth.9975, author="Piegari, Giuseppe and Iovane, Valentina and Carletti, Vincenzo and Fico, Rosario and Costagliola, Alessandro and De Biase, Davide and Prisco, Francesco and Paciello, Orlando", title="Assessment of Google Glass for Photographic Documentation in Veterinary Forensic Pathology: Usability Study", journal="JMIR Mhealth Uhealth", year="2018", month="Sep", day="21", volume="6", number="9", pages="e180", keywords="Google Glass", keywords="necropsy", keywords="pictures", keywords="documentation", keywords="veterinary forensic pathology", keywords="mobile phone", abstract="Background: Google Glass is a head-mounted device designed in the shape of a pair of eyeglasses equipped with a 5.0-megapixel integrated camera and capable of taking pictures with simple voice commands. Objective: The objective of our study was to determine whether Google Glass is fit for veterinary forensic pathology purposes. Methods: A total of 44 forensic necropsies of 2 different species (22 dogs and 22 cats) were performed by 2 pathologists; each pathologist conducted 11 necropsies of each species and, for each photographic acquisition, the images were taken with a Google Glass device and a Nikon D3200 digital single-lens reflex (DSLR) camera. The pictures were collected, divided into 3 groups (based on the external appearance of the animal, organs, and anatomical details), and evaluated by 5 forensic pathologists using a 5-point score system. The parameters assessed were overall color settings, region of interest, sharpness, and brightness. To evaluate the difference in mean duration between necropsies conduced with Google Glass and DSLR camera and to assess the battery consumption of the devices, an additional number of 16 necropsies were performed by the 2 pathologists. In these cases, Google Glass was used for photographic reports in 8 cases (4 dogs and 4 cats) and a Nikon D3200 reflex camera in the other 8 cases. Statistical evaluations were performed to assess the differences in ratings between the quality of the images taken with both devices. Results: The images taken with Google Glass received significantly lower ratings than those acquired with reflex camera for all 4 assessed parameters (P<.001). In particular, for the pictures of Groups A and B taken with Google Glass, the sum of frequency of ratings 5 (very good) and 4 (good) was between 50\% and 77\% for all 4 assessed parameters. The lowest ratings were observed for the pictures of Group C, with a sum of frequency of ratings 5 and 4 of 21.1\% (342/1602) for region of interest, 26\% (421/1602) for sharpness, 35.5\% (575/1602) for overall color settings, and 61.4\% (995/1602) for brightness. Furthermore, we found a significant reduction in the mean execution time for necropsy conduced with the Google Glass with respect to the reflex group (P<.001). However, Google Glass drained the battery very quickly. Conclusions: These findings suggest that Google Glass is usable in veterinary forensic pathology. In particular, the image quality of Groups A and B seemed adequate for forensic photographic documentation purposes, although the quality was lower than that with the reflex camera. However, in this step of development, the high frequency of poor ratings observed for the pictures of Group C suggest that the device is not suitable for taking pictures of small anatomical details or close-ups of the injuries. ", doi="10.2196/mhealth.9975", url="http://mhealth.jmir.org/2018/9/e180/", url="http://www.ncbi.nlm.nih.gov/pubmed/30249586" } @Article{info:doi/10.2196/10762, author="Odenheimer, Sandra and Goyal, Deepika and Jones, Goel Veena and Rosenblum, Ruth and Ho, Lam and Chan, S. Albert", title="Patient Acceptance of Remote Scribing Powered by Google Glass in Outpatient Dermatology: Cross-Sectional Study", journal="J Med Internet Res", year="2018", month="Jun", day="21", volume="20", number="6", pages="e10762", keywords="acceptance, clinician burnout, communication, Google Glass, health care provider, patient, remote scribing, trust", abstract="Background: The ubiquitous use of electronic health records (EHRs) during medical office visits using a computer monitor and keyboard can be distracting and can disrupt patient-health care provider (HCP) nonverbal eye contact cues, which are integral to effective communication. Provider use of a remote medical scribe with face-mounted technology (FMT), such as Google Glass, may preserve patient-HCP communication dynamics in health care settings by allowing providers to maintain direct eye contact with their patients while still having access to the patient's relevant EHR information. The medical scribe is able to chart patient encounters in real-time working in an offsite location, document the visit directly into EHR, and free HCP to focus only on the patient. Objective: The purpose of this study was to examine patient perceptions of their interactions with an HCP who used FMT with a remote medical scribe during office visits. This includes an examination of any association between patient privacy and trust in their HCP when FMT is used in the medical office setting. Methods: For this descriptive, cross-sectional study, a convenience sample of patients was recruited from an outpatient dermatology clinic in Northern California. Participants provided demographic data and completed a 12-item questionnaire to assess their familiarity, comfort, privacy, and perceptions following routine office visits with an HCP where FMT was used to document the clinical encounter. Data were analyzed using appropriate descriptive and inferential statistics. Results: Over half of the 170 study participants were female (102/170, 59.4\%), 60.0\% were Caucasian (102/170), 24.1\% were Asian (41/170), and 88.8\% were college-educated (151/170). Age ranged between 18 and 90 years (mean 50.5, SD 17.4). The majority of participants (118/170, 69.4\%) were familiar with FMT, not concerned with privacy issues (132/170, 77.6\%), and stated that the use of FMT did not affect their trust in their HCP (139/170, 81.8\%). Moreover, participants comfortable with the use of FMT were less likely to be concerned about privacy (P<.001) and participants who trusted their HCP were less likely to be concerned about their HCP using Google Glass (P<.009). Almost one-third of them self-identified as early technology adopters (49/170, 28.8\%) and 87\% (148/170) preferred their HCP using FMT if it delivered better care. Conclusions: Our study findings support the patient acceptance of Google Glass use for outpatient dermatology visits. Future research should explore the use of FMT in other areas of health care and strive to include a socioeconomically diverse patient population in study samples. ", doi="10.2196/10762", url="http://www.jmir.org/2018/6/e10762/", url="http://www.ncbi.nlm.nih.gov/pubmed/29929947" } @Article{info:doi/10.2196/mental.9631, author="Vahabzadeh, Arshya and Keshav, U. Neha and Salisbury, P. Joseph and Sahin, T. Ned", title="Improvement of Attention-Deficit/Hyperactivity Disorder Symptoms in School-Aged Children, Adolescents, and Young Adults With Autism via a Digital Smartglasses-Based Socioemotional Coaching Aid: Short-Term, Uncontrolled Pilot Study", journal="JMIR Ment Health", year="2018", month="Apr", day="02", volume="5", number="2", pages="e25", keywords="autism spectrum disorder", keywords="Asperger syndrome", keywords="augmented reality", keywords="virtual reality", keywords="artificial intelligence", keywords="affective computing", keywords="patient education as a topic", keywords="ADHD", keywords="attention deficit disorder with hyperactivity", keywords="attention", keywords="smartglasses", abstract="Background: People with autism spectrum disorder (ASD) commonly experience symptoms related to attention-deficit/hyperactivity disorder (ADHD), including hyperactivity, inattention, and impulsivity. One-third of ASD cases may be complicated by the presence of ADHD. Individuals with dual diagnoses face greater barriers to accessing treatment for ADHD and respond less positively to primary pharmacologic interventions. Nonpharmacologic technology-aided tools for hyperactivity and inattention in people with ASD are being developed, although research into their efficacy and safety remains limited. Objective: The objective of this preliminary study was to describe the changes in ADHD-related symptoms in children, adolescents, and young adults with ASD immediately after use of the Empowered Brain system, a behavioral and social communication aid for ASD running on augmented reality smartglasses. Methods: We recruited 8 children, adolescents, and young adults with ASD (male to female ratio of 7:1, mean age 15 years, range 11.7-20.5 years) through a Web-based research signup form. The baseline score on the hyperactivity subscale of the Aberrant Behavioral Checklist (ABC-H), a measure of hyperactivity, inattention, and impulsivity, determined their classification into a high ADHD-related symptom group (n=4, ABC-H?13) and a low ADHD-related symptom group (n=4, ABC-H<13). All participants received an intervention with Empowered Brain, where they used smartglasses-based social communication and behavioral modules while interacting with their caregiver. We then calculated caregiver-reported ABC-H scores at 24 and 48 hours after the session. Results: All 8 participants were able to complete the intervention session. Postintervention ABC-H scores were lower for most participants at 24 hours (n=6, 75\%) and for all participants at 48 hours (n=8, 100\%). At 24 hours after the session, average participant ABC-H scores decreased by 54.9\% in the high ADHD symptom group and by 20\% in the low ADHD symptom group. At 48 hours after the session, ABC-H scores compared with baseline decreased by 56.4\% in the high ADHD symptom group and by 66.3\% in the low ADHD symptom group. Conclusions: This study provides initial evidence for the possible potential of the Empowered Brain system to reduce ADHD-related symptoms, such as hyperactivity, inattention, and impulsivity, in school-aged children, adolescents, and young adults with ASD. This digital smartglasses intervention can potentially be targeted at a broader array of mental health conditions that exhibit transdiagnostic attentional and social communication deficits, including schizophrenia and bipolar disorder. Further research is required to understand the clinical importance of these observed changes and to conduct longitudinal studies on this intervention with control groups and larger sample sizes. ", doi="10.2196/mental.9631", url="http://mental.jmir.org/2018/2/e25/", url="http://www.ncbi.nlm.nih.gov/pubmed/29610109" } @Article{info:doi/10.2196/mhealth.9409, author="Wei, J. Nancy and Dougherty, Bryn and Myers, Aundria and Badawy, M. Sherif", title="Using Google Glass in Surgical Settings: Systematic Review", journal="JMIR Mhealth Uhealth", year="2018", month="Mar", day="06", volume="6", number="3", pages="e54", keywords="Google Glass", keywords="wearable", keywords="wearable device", keywords="head-mounted wearable device", keywords="surgery", keywords="surgical setting", keywords="surgical condition", abstract="Background: In recent years, wearable devices have become increasingly attractive and the health care industry has been especially drawn to Google Glass because of its ability to serve as a head-mounted wearable device. The use of Google Glass in surgical settings is of particular interest due to the hands-free device potential to streamline workflow and maintain sterile conditions in an operating room environment. Objective: The aim is to conduct a systematic evaluation of the literature on the feasibility and acceptability of using Google Glass in surgical settings and to assess the potential benefits and limitations of its application. Methods: The literature was searched for articles published between January 2013 and May 2017. The search included the following databases: PubMed MEDLINE, Embase, Cumulative Index to Nursing and Allied Health Literature, PsycINFO (EBSCO), and IEEE Xplore. Two reviewers independently screened titles and abstracts and assessed full-text articles. Original research articles that evaluated the feasibility, usability, or acceptability of using Google Glass in surgical settings were included. This review was completed following the Preferred Reporting Results of Systematic Reviews and Meta-Analyses guidelines. Results: Of the 520 records obtained, 31 met all predefined criteria and were included in this review. Google Glass was used in various surgical specialties. Most studies were in the United States (23/31, 74\%) and all were conducted in hospital settings: 29 in adult hospitals (29/31, 94\%) and two in children's hospitals (2/31, 7\%). Sample sizes of participants who wore Google Glass ranged from 1 to 40. Of the 31 studies, 25 (81\%) were conducted under real-time conditions or actual clinical care settings, whereas the other six (19\%) were conducted under simulated environment. Twenty-six studies were pilot or feasibility studies (84\%), three were case studies (10\%), and two were randomized controlled trials (6\%). The majority of studies examined the potential use of Google Glass as an intraoperative intervention (27/31, 87\%), whereas others observed its potential use in preoperative (4/31, 13\%) and postoperative settings (5/31, 16\%). Google Glass was utilized as a videography and photography device (21/31, 68\%), a vital sign monitor (6/31, 19\%), a surgical navigation display (5/31, 16\%), and as a videoconferencing tool to communicate with remote surgeons intraoperatively (5/31, 16\%). Most studies reported moderate or high acceptability of using Google Glass in surgical settings. The main reported limitations of using Google Glass utilization were short battery life (8/31, 26\%) and difficulty with hands-free features (5/31, 16\%). Conclusions: There are promising feasibility and usability data of using Google Glass in surgical settings with particular benefits for surgical education and training. Despite existing technical limitations, Google Glass was generally well received and several studies in surgical settings acknowledged its potential for training, consultation, patient monitoring, and audiovisual recording. ", doi="10.2196/mhealth.9409", url="http://mhealth.jmir.org/2018/3/e54/", url="http://www.ncbi.nlm.nih.gov/pubmed/29510969" } @Article{info:doi/10.2196/mhealth.8478, author="Salisbury, P. Joseph and Keshav, U. Neha and Sossong, D. Anthony and Sahin, T. Ned", title="Concussion Assessment With Smartglasses: Validation Study of Balance Measurement Toward a Lightweight, Multimodal, Field-Ready Platform", journal="JMIR Mhealth Uhealth", year="2018", month="Jan", day="23", volume="6", number="1", pages="e15", keywords="postural balance", keywords="wearable technology", keywords="accelerometry", keywords="mild traumatic brain injury", abstract="Background: Lightweight and portable devices that objectively measure concussion-related impairments could improve injury detection and critical decision-making in contact sports and the military, where brain injuries commonly occur but remain underreported. Current standard assessments often rely heavily on subjective methods such as symptom self-reporting. Head-mounted wearables, such as smartglasses, provide an emerging platform for consideration that could deliver the range of assessments necessary to develop a rapid and objective screen for brain injury. Standing balance assessment, one parameter that may inform a concussion diagnosis, could theoretically be performed quantitatively using current off-the-shelf smartglasses with an internal accelerometer. However, the validity of balance measurement using smartglasses has not been investigated. Objective: This study aimed to perform preliminary validation of a smartglasses-based balance accelerometer measure (BAM) compared with the well-described and characterized waist-based BAM. Methods: Forty-two healthy individuals (26 male, 16 female; mean age 23.8 [SD 5.2] years) participated in the study. Following the BAM protocol, each subject performed 2 trials of 6 balance stances while accelerometer and gyroscope data were recorded from smartglasses (Glass Explorer Edition). Test-retest reliability and correlation were determined relative to waist-based BAM as used in the National Institutes of Health's Standing Balance Toolbox. Results: Balance measurements obtained using a head-mounted wearable were highly correlated with those obtained through a waist-mounted accelerometer (Spearman rho, $\rho$=.85). Test-retest reliability was high (intraclass correlation coefficient, ICC2,1=0.85, 95\% CI 0.81-0.88) and in good agreement with waist balance measurements (ICC2,1=0.84, 95\% CI 0.80-0.88). Considering the normalized path length magnitude across all 3 axes improved interdevice correlation ($\rho$=.90) while maintaining test-retest reliability (ICC2,1=0.87, 95\% CI 0.83-0.90). All subjects successfully completed the study, demonstrating the feasibility of using a head-mounted wearable to assess balance in a healthy population. Conclusions: Balance measurements derived from the smartglasses-based accelerometer were consistent with those obtained using a waist-mounted accelerometer. Additional research is necessary to determine to what extent smartglasses-based accelerometry measures can detect balance dysfunction associated with concussion. However, given the potential for smartglasses to perform additional concussion-related assessments in an integrated, wearable platform, continued development and validation of a smartglasses-based balance assessment is warranted. This approach could lead to a wearable platform for real-time assessment of concussion-related impairments that could be further augmented with telemedicine capabilities to integrate professional clinical guidance. Smartglasses may be superior to fully immersive virtual reality headsets for this application, given their lighter weight and reduced likelihood of potential safety concerns. ", doi="10.2196/mhealth.8478", url="http://mhealth.jmir.org/2018/1/e15/", url="http://www.ncbi.nlm.nih.gov/pubmed/29362210" } @Article{info:doi/10.2196/humanfactors.8785, author="Sahin, T. Ned and Keshav, U. Neha and Salisbury, P. Joseph and Vahabzadeh, Arshya", title="Second Version of Google Glass as a Wearable Socio-Affective Aid: Positive School Desirability, High Usability, and Theoretical Framework in a Sample of Children with Autism", journal="JMIR Hum Factors", year="2018", month="Jan", day="04", volume="5", number="1", pages="e1", keywords="autism", keywords="technology", keywords="digital health", keywords="augmented reality", keywords="virtual reality", keywords="smartglasses", keywords="usability", keywords="schools", keywords="education", keywords="classroom", keywords="IDEA", keywords="IEP", keywords="special education", abstract="Background: Computerized smartglasses are being developed as an assistive technology for daily activities in children and adults with autism spectrum disorder (ASD). While smartglasses may be able to help with educational and behavioral needs, their usability and acceptability in children with ASD is largely unknown. There have been reports of negative social perceptions surrounding smartglasses use in mainstream populations, a concern given that assistive technologies may already carry their own stigma. Children with ASD may also have a range of additional behavioral, developmental, and social challenges when asked to use this emerging technology in school and home settings. Objective: The usability and acceptability of Glass Enterprise Edition (Glass), the successor to Google Glass smartglasses, were explored in children with ASD and their caregivers. Methods: Eight children with ASD and their caregivers were recruited to attend a demonstration session with Glass smartglasses the week they were publicly released. The children had a wide range of ability, including limited speech to speaking, and represented a full range of school ages (6 to 17 years). Children and caregivers were interviewed about their experience of using the smartglasses and whether they would use them at school and home. Results: All 8 children succeeded in using Glass and did not feel stressed (8/8, 100\%) or experience any overwhelming sensory or emotional issues during the session (8/8, 100\%). All 8 children (8/8, 100\%) endorsed that they would be willing to wear and use the device in both home and school settings. Caregivers felt the experience was fun for the children (8/8, 100\%), and most caregivers felt the experience was better than they had expected (6/8, 75\%). Conclusions: A wide age and ability range of children with ASD used Glass immediately after it was released and found it to be usable and acceptable. Despite concerns about potential stigma or social acceptability, all of the children were prepared to use the technology in both home and school settings. Encouragingly, most caregivers noted a very positive response. There were no behavioral, developmental, or social- or stigma-related concerns during or after the session. Smartglasses may be a useful future technology for children with ASD and are readily accepted for use by children with ASD and their caregivers. ", doi="10.2196/humanfactors.8785", url="http://humanfactors.jmir.org/2018/1/e1/", url="http://www.ncbi.nlm.nih.gov/pubmed/29301738" } @Article{info:doi/10.2196/mhealth.8470, author="Koh, Elizabeth Hyeseung and Oh, Jeeyun and Mackert, Michael", title="Predictors of Playing Augmented Reality Mobile Games While Walking Based on the Theory of Planned Behavior: Web-Based Survey", journal="JMIR Mhealth Uhealth", year="2017", month="Dec", day="11", volume="5", number="12", pages="e191", keywords="mobile phone", keywords="pedestrians", keywords="safety on the street", keywords="psychological models", keywords="predictive value of tests", keywords="intention", keywords="age factors", keywords="attitude", keywords="social norms", keywords="self-efficacy", keywords="habits", keywords="immersion", keywords="self-report", abstract="Background: There has been a sharp increase in the number of pedestrians injured while using a mobile phone, but little research has been conducted to explain how and why people use mobile devices while walking. Therefore, we conducted a survey study to explicate the motivations of mobile phone use while walking Objective: The purpose of this study was to identify the critical predictors of behavioral intention to play a popular mobile game, Pokemon Go, while walking, based on the theory of planned behavior (TPB). In addition to the three components of TPB, automaticity, immersion, and enjoyment were added to the model. This study is a theory-based investigation that explores the underlying mechanisms of mobile phone use while walking focusing on a mobile game behavior. Methods: Participants were recruited from a university (study 1; N=262) and Amazon Mechanical Turk (MTurk) (study 2; N=197) in the United States. Participants completed a Web-based questionnaire, which included measures of attitude, subjective norms, perceived behavioral control (PBC), automaticity, immersion, and enjoyment. Participants also answered questions regarding demographic items. Results: Hierarchical regression analyses were conducted to examine hypotheses. The model we tested explained about 41\% (study 1) and 63\% (study 2) of people's intention to play Pokemon Go while walking. The following 3 TPB variables were significant predictors of intention to play Pokemon Go while walking in study 1 and study 2: attitude (P<.001), subjective norms (P<.001), and PBC (P=.007 in study 1; P<.001 in study 2). Automaticity tendency (P<.001), immersion (P=.02), and enjoyment (P=.04) were significant predictors in study 1, whereas enjoyment was the only significant predictor in study 2 (P=.01). Conclusions: Findings from this study demonstrated the utility of TPB in predicting a new behavioral domain---mobile use while walking. To sum up, younger users who are habitual, impulsive, and less immersed players are more likely to intend to play a mobile game while walking. ", doi="10.2196/mhealth.8470", url="http://mhealth.jmir.org/2017/12/e191/", url="http://www.ncbi.nlm.nih.gov/pubmed/29229586" } @Article{info:doi/10.2196/mhealth.8671, author="Dougherty, Bryn and Badawy, M. Sherif", title="Using Google Glass in Nonsurgical Medical Settings: Systematic Review", journal="JMIR Mhealth Uhealth", year="2017", month="Oct", day="19", volume="5", number="10", pages="e159", keywords="Google Glass", keywords="wearable", keywords="wearable device", keywords="head-mounted wearable device", keywords="non-surgical setting", keywords="non-surgical condition", keywords="medical setting", keywords="medical condition", abstract="Background: Wearable technologies provide users hands-free access to computer functions and are becoming increasingly popular on both the consumer market and in various industries. The medical industry has pioneered research and implementation of head-mounted wearable devices, such as Google Glass. Most of this research has focused on surgical interventions; however, other medical fields have begun to explore the potential of this technology to support both patients and clinicians. Objective: Our aim was to systematically evaluate the feasibility, usability, and acceptability of using Google Glass in nonsurgical medical settings and to determine the benefits, limitations, and future directions of its application. Methods: This review covers literature published between January 2013 and May 2017. Searches included PubMed MEDLINE, Embase, INSPEC (Ebsco), Cochrane Central Register of Controlled Trials (CENTRAL), IEEE Explore, Web of Science, Scopus, and Compendex. The search strategy sought all articles on Google Glass. Two reviewers independently screened titles and abstracts, assessed full-text articles, and extracted data from articles that met all predefined criteria. Any disagreements were resolved by discussion or consultation by the senior author. Included studies were original research articles that evaluated the feasibility, usability, or acceptability of Google Glass in nonsurgical medical settings. The preferred reporting results of systematic reviews and meta-analyses (PRISMA) guidelines were followed for reporting of results. Results: Of the 852 records examined, 51 met all predefined criteria, including patient-centered (n=21) and clinician-centered studies (n=30). Patient-centered studies explored the utility of Google Glass in supporting patients with motor impairments (n=8), visual impairments (n=5), developmental and psychiatric disorders (n=2), weight management concerns (n=3), allergies (n=1), or other health concerns (n=2). Clinician-centered studies explored the utility of Google Glass in student training (n=9), disaster relief (n=4), diagnostics (n=2), nursing (n=1), autopsy and postmortem examination (n=1), wound care (n=1), behavioral sciences (n=1), and various medical subspecialties, including, cardiology (n=3), radiology (n=3), neurology (n=1), anesthesiology (n=1), pulmonology (n=1), toxicology (n=1), and dermatology (n=1). Most of the studies were conducted in the United States (40/51, 78\%), did not report specific age information for participants (38/51, 75\%), had sample size <30 participants (29/51, 57\%), and were pilot or feasibility studies (31/51, 61\%). Most patient-centered studies (19/21, 90\%) demonstrated feasibility with high satisfaction and acceptability among participants, despite a few technical challenges with the device. A number of clinician-centered studies (11/30, 37\%) reported low to moderate satisfaction among participants, with the most promising results being in the area of student training. Studies varied in sample size, approach for implementation of Google Glass, and outcomes assessment. Conclusions: The use of Google Glass in nonsurgical medical settings varied. More promising results regarding the feasibility, usability, and acceptability of using Google Glass were seen in patient-centered studies and student training settings. Further research evaluating the efficacy and cost-effectiveness of Google Glass as an intervention to improve important clinical outcomes is warranted. ", doi="10.2196/mhealth.8671", url="http://mhealth.jmir.org/2017/10/e159/", url="http://www.ncbi.nlm.nih.gov/pubmed/29051136" } @Article{info:doi/10.2196/mhealth.8534, author="Keshav, U. Neha and Salisbury, P. Joseph and Vahabzadeh, Arshya and Sahin, T. Ned", title="Social Communication Coaching Smartglasses: Well Tolerated in a Diverse Sample of Children and Adults With Autism", journal="JMIR Mhealth Uhealth", year="2017", month="Sep", day="21", volume="5", number="9", pages="e140", keywords="autism", keywords="tech", keywords="digital health", keywords="smartglasses", keywords="augmented reality", keywords="autism spectrum disorder", keywords="technology", keywords="medtech", keywords="education", abstract="Background: Augmented reality (AR) smartglasses are an emerging technology that is under investigation as a social communication aid for children and adults with autism spectrum disorder (ASD) and as a research tool to aid with digital phenotyping. Tolerability of this wearable technology in people with ASD is an important area for research, especially as these individuals may experience sensory, cognitive, and attentional challenges. Objective: The aim of this study was to assess the tolerability and usability of a novel smartglasses system that has been designed as a social communication aid for children and adults with autism (the Brain Power Autism System [BPAS]). BPAS runs on Google Glass Explorer Edition and other smartglasses, uses both AR and affective artificial intelligence, and helps users learn key social and emotional skills. Methods: A total of 21 children and adults with ASD across a spectrum of severity used BPAS for a coaching session. The user's tolerability to the smartglasses, user being able to wear the smartglasses for 1 minute (initial tolerability threshold), and user being able to wear the smartglasses for the entire duration of the coaching session (whole session tolerability threshold) were determined through caregiver report. Results: Of 21 users, 19 (91\%) demonstrated tolerability on all 3 measures. Caregivers reported 21 out of 21 users (100\%) as tolerating the experience, while study staff found only 19 out of 21 users managed to demonstrate initial tolerability (91\%). Of the 19 users who demonstrated initial tolerability, all 19 (100\%) were able to use the smartglasses for the entire session (whole session tolerability threshold). Caregivers reported that 19 out of 21 users (91\%) successfully used BPAS, and users surpassed caregiver expectations in 15 of 21 cases (71\%). Users who could communicate reported BPAS as being comfortable (94\%). Conclusions: This preliminary report suggests that BPAS is well tolerated and usable to a diverse age- and severity-range of people with ASD. This is encouraging as these devices are being developed as assistive technologies for people with ASD. Further research should focus on improving smartglasses design and exploring their efficacy in helping with social communication in children and adults with ASD. ", doi="10.2196/mhealth.8534", url="http://mhealth.jmir.org/2017/9/e140/", url="http://www.ncbi.nlm.nih.gov/pubmed/28935618" } @Article{info:doi/10.2196/mhealth.7943, author="Noll, Christoph and von Jan, Ute and Raap, Ulrike and Albrecht, Urs-Vito", title="Mobile Augmented Reality as a Feature for Self-Oriented, Blended Learning in Medicine: Randomized Controlled Trial", journal="JMIR Mhealth Uhealth", year="2017", month="Sep", day="14", volume="5", number="9", pages="e139", keywords="problem-based learning", keywords="cellular phone", keywords="education", keywords="medical", keywords="mHealth", abstract="Background: Advantages of mobile Augmented Reality (mAR) application-based learning versus textbook-based learning were already shown in a previous study. However, it was unclear whether the augmented reality (AR) component was responsible for the success of the self-developed app or whether this was attributable to the novelty of using mobile technology for learning. Objective: The study's aim was to test the hypothesis whether there is no difference in learning success between learners who employed the mobile AR component and those who learned without it to determine possible effects of mAR. Also, we were interested in potential emotional effects of using this technology. Methods: Forty-four medical students (male: 25, female: 19, mean age: 22.25 years, standard deviation [SD]: 3.33 years) participated in this study. Baseline emotional status was evaluated using the Profile of Mood States (POMS) questionnaire. Dermatological knowledge was ascertained using a single choice (SC) test (10 questions). The students were randomly assigned to learn 45 min with either a mobile learning method with mAR (group A) or without AR (group B). Afterwards, both groups were again asked to complete the previous questionnaires. AttrakDiff 2 questionnaires were used to evaluate the perceived usability as well as pragmatic and hedonic qualities. For capturing longer term effects, after 14 days, all participants were again asked to complete the SC questionnaire. All evaluations were anonymous, and descriptive statistics were calculated. For hypothesis testing, an unpaired signed-rank test was applied. Results: For the SC tests, there were only minor differences, with both groups gaining knowledge (average improvement group A: 3.59 [SD 1.48]; group B: 3.86 [SD 1.51]). Differences between both groups were statistically insignificant (exact Mann Whitney U, U=173.5; P=.10; r=.247). However, in the follow-up SC test after 14 days, group A had retained more knowledge?(average decrease of the number of correct answers group A: 0.33 [SD 1.62]; group B: 1.14 [SD 1.30]). For both groups, descriptively, there were only small variations regarding emotional involvement, and learning experiences also differed little, with both groups rating the app similar for its stimulating effect. Conclusions: We were unable to show significant effects for mAR on the immediate learning success of the mobile learning setting. However, the similar level of stimulation being noted for both groups is inconsistent with the previous assumption of the success of mAR-based approach being solely attributable to the excitement of using mobile technology, independent of mAR; the mAR group showed some indications for a better long-term retention of knowledge. Further studies are needed to examine this aspect. Trial Registration: German Clinical Trials Register (DRKS): 00012980; http://www.drks.de/drks\_web/navigate.do? navigationId=trial.HTML\&TRIAL\_ID=DRKS00012980?(Archived by WebCite at?http://www.webcitation.org/ 6tCWoM2Jb). ", doi="10.2196/mhealth.7943", url="http://mhealth.jmir.org/2017/9/e139/", url="http://www.ncbi.nlm.nih.gov/pubmed/28912113" } @Article{info:doi/10.2196/jmir.7379, author="Siebert, N. Johan and Ehrler, Frederic and Gervaix, Alain and Haddad, Kevin and Lacroix, Laurence and Schrurs, Philippe and Sahin, Ayhan and Lovis, Christian and Manzano, Sergio", title="Adherence to AHA Guidelines When Adapted for Augmented Reality Glasses for Assisted Pediatric Cardiopulmonary Resuscitation: A Randomized Controlled Trial", journal="J Med Internet Res", year="2017", month="May", day="29", volume="19", number="5", pages="e183", keywords="resuscitation", keywords="emergency medicine", keywords="pediatrics", keywords="biomedical technologies", keywords="equipment and supplies", keywords="eyeglasses", abstract="Background: The American Heart Association (AHA) guidelines for cardiopulmonary resuscitation (CPR) are nowadays recognized as the world's most authoritative resuscitation guidelines. Adherence to these guidelines optimizes the management of critically ill patients and increases their chances of survival after cardiac arrest. Despite their availability, suboptimal quality of CPR is still common. Currently, the median hospital survival rate after pediatric in-hospital cardiac arrest is 36\%, whereas it falls below 10\% for out-of-hospital cardiac arrest. Among emerging information technologies and devices able to support caregivers during resuscitation and increase adherence to AHA guidelines, augmented reality (AR) glasses have not yet been assessed. In order to assess their potential, we adapted AHA Pediatric Advanced Life Support (PALS) guidelines for AR glasses. Objective: The study aimed to determine whether adapting AHA guidelines for AR glasses increased adherence by reducing deviation and time to initiation of critical life-saving maneuvers during pediatric CPR when compared with the use of PALS pocket reference cards. Methods: We conducted a randomized controlled trial with two parallel groups of voluntary pediatric residents, comparing AR glasses to PALS pocket reference cards during a simulation-based pediatric cardiac arrest scenario---pulseless ventricular tachycardia (pVT). The primary outcome was the elapsed time in seconds in each allocation group, from onset of pVT to the first defibrillation attempt. Secondary outcomes were time elapsed to (1) initiation of chest compression, (2) subsequent defibrillation attempts, and (3) administration of drugs, as well as the time intervals between defibrillation attempts and drug doses, shock doses, and number of shocks. All these outcomes were assessed for deviation from AHA guidelines. Results: Twenty residents were randomized into 2 groups. Time to first defibrillation attempt (mean: 146 s) and adherence to AHA guidelines in terms of time to other critical resuscitation endpoints and drug dose delivery were not improved using AR glasses. However, errors and deviations were significantly reduced in terms of defibrillation doses when compared with the use of the PALS pocket reference cards. In a total of 40 defibrillation attempts, residents not wearing AR glasses used wrong doses in 65\% (26/40) of cases, including 21 shock overdoses >100 J, for a cumulative defibrillation dose of 18.7 Joules per kg. These errors were reduced by 53\% (21/40, P<.001) and cumulative defibrillation dose by 37\% (5.14/14, P=.001) with AR glasses. Conclusions: AR glasses did not decrease time to first defibrillation attempt and other critical resuscitation endpoints when compared with PALS pocket cards. However, they improved adherence and performance among residents in terms of administering the defibrillation doses set by AHA. ", doi="10.2196/jmir.7379", url="http://www.jmir.org/2017/5/e183/", url="http://www.ncbi.nlm.nih.gov/pubmed/28554878" } @Article{info:doi/10.2196/jmir.6686, author="Rouleau, Genevi{\`e}ve and Gagnon, Marie-Pierre and C{\^o}t{\'e}, Jos{\'e} and Payne-Gagnon, Julie and Hudson, Emilie and Dubois, Carl-Ardy", title="Impact of Information and Communication Technologies on Nursing Care: Results of an Overview of Systematic Reviews", journal="J Med Internet Res", year="2017", month="Apr", day="25", volume="19", number="4", pages="e122", keywords="information and communication technology", keywords="eHealth", keywords="telehealth", keywords="nursing care", keywords="review, overview of systematic review", abstract="Background: Information and communication technologies (ICTs) are becoming an impetus for quality health care delivery by nurses. The use of ICTs by nurses can impact their practice, modifying the ways in which they plan, provide, document, and review clinical care. Objective: An overview of systematic reviews was conducted to develop a broad picture of the dimensions and indicators of nursing care that have the potential to be influenced by the use of ICTs. Methods: Quantitative, mixed-method, and qualitative reviews that aimed to evaluate the influence of four eHealth domains (eg, management, computerized decision support systems [CDSSs], communication, and information systems) on nursing care were included. We used the nursing care performance framework (NCPF) as an extraction grid and analytical tool. This model illustrates how the interplay between nursing resources and the nursing services can produce changes in patient conditions. The primary outcomes included nurses' practice environment, nursing processes, professional satisfaction, and nursing-sensitive outcomes. The secondary outcomes included satisfaction or dissatisfaction with ICTs according to nurses' and patients' perspectives. Reviews published in English, French, or Spanish from January 1, 1995 to January 15, 2015, were considered. Results: A total of 5515 titles or abstracts were assessed for eligibility and full-text papers of 72 articles were retrieved for detailed evaluation. It was found that 22 reviews published between 2002 and 2015 met the eligibility criteria. Many nursing care themes (ie, indicators) were influenced by the use of ICTs, including time management; time spent on patient care; documentation time; information quality and access; quality of documentation; knowledge updating and utilization; nurse autonomy; intra and interprofessional collaboration; nurses' competencies and skills; nurse-patient relationship; assessment, care planning, and evaluation; teaching of patients and families; communication and care coordination; perspectives of the quality of care provided; nurses and patients satisfaction or dissatisfaction with ICTs; patient comfort and quality of life related to care; empowerment; and functional status. Conclusions: The findings led to the identification of 19 indicators related to nursing care that are impacted by the use of ICTs. To the best of our knowledge, this was the first attempt to apply NCPF in the ICTs' context. This broad representation could be kept in mind when it will be the time to plan and to implement emerging ICTs in health care settings. Trial Registration: PROSPERO International Prospective Register of Systematic Reviews: CRD42014014762; http://www.crd.york.ac.uk/PROSPERO/display\_record.asp?ID=CRD42014014762 (Archived by WebCite at http://www.webcitation.org/6pIhMLBZh) ", doi="10.2196/jmir.6686", url="http://www.jmir.org/2017/4/e122/", url="http://www.ncbi.nlm.nih.gov/pubmed/28442454" } @Article{info:doi/10.2196/jmir.6759, author="Althoff, Tim and White, W. Ryen and Horvitz, Eric", title="Influence of Pok{\'e}mon Go on Physical Activity: Study and Implications", journal="J Med Internet Res", year="2016", month="Dec", day="06", volume="18", number="12", pages="e315", keywords="physical activity", keywords="Pok{\'e}mon Go", keywords="mobile health", keywords="mHealth", keywords="wearable devices", keywords="mobile applications", keywords="games", keywords="exergames", keywords="public health", abstract="Background: Physical activity helps people maintain a healthy weight and reduces the risk for several chronic diseases. Although this knowledge is widely recognized, adults and children in many countries around the world do not get recommended amounts of physical activity. Although many interventions are found to be ineffective at increasing physical activity or reaching inactive populations, there have been anecdotal reports of increased physical activity due to novel mobile games that embed game play in the physical world. The most recent and salient example of such a game is Pok{\'e}mon Go, which has reportedly reached tens of millions of users in the United States and worldwide. Objective: The objective of this study was to quantify the impact of Pok{\'e}mon Go on physical activity. Methods: We study the effect of Pok{\'e}mon Go on physical activity through a combination of signals from large-scale corpora of wearable sensor data and search engine logs for 32,000 Microsoft Band users over a period of 3 months. Pok{\'e}mon Go players are identified through search engine queries and physical activity is measured through accelerometers. Results: We find that Pok{\'e}mon Go?leads to significant increases in physical activity over a period of 30 days, with particularly engaged users (ie, those making multiple search queries for details about game usage) increasing their activity by 1473 steps a day on average, a more than 25\% increase compared with their prior activity level (P<.001). In the short time span of the study, we estimate that Pok{\'e}mon Go?has added a total of 144 billion steps to US physical activity. Furthermore, Pok{\'e}mon Go?has been able to increase physical activity across men and women of all ages, weight status, and prior activity levels showing this form of game leads to increases in physical activity with significant implications for public health. In particular, we find that Pok{\'e}mon Go?is able to reach low activity populations, whereas all 4 leading mobile health apps studied in this work largely draw from an already very active population. Conclusions: Mobile apps combining game play with physical activity lead to substantial short-term activity increases and, in contrast to many existing interventions and mobile health apps, have the potential to reach activity-poor populations. Future studies are needed to investigate potential long-term effects of these applications. ", doi="10.2196/jmir.6759", url="http://www.jmir.org/2016/12/e315/", url="http://www.ncbi.nlm.nih.gov/pubmed/27923778" } @Article{info:doi/10.2196/mededu.5159, author="Chaballout, Basil and Molloy, Margory and Vaughn, Jacqueline and Brisson III, Raymond and Shaw, Ryan", title="Feasibility of Augmented Reality in Clinical Simulations: Using Google Glass With Manikins", journal="JMIR Medical Education", year="2016", month="Mar", day="07", volume="2", number="1", pages="e2", keywords="clinical simulation", keywords="augmented reality", keywords="feasibility", keywords="student learning", keywords="Google Glass", abstract="Background: Studies show that students who use fidelity-based simulation technology perform better and have higher retention rates than peers who learn in traditional paper-based training. Augmented reality is increasingly being used as a teaching and learning tool in a continual effort to make simulations more realistic for students. Objective: The aim of this project was to assess the feasibility and acceptability of using augmented reality via Google Glass during clinical simulation scenarios for training health science students. Methods: Students performed a clinical simulation while watching a video through Google Glass of a patient actor simulating respiratory distress. Following participation in the scenarios students completed two surveys and were questioned if they would recommend continued use of this technology in clinical simulation experiences. Results: We were able to have students watch a video in their field of vision of a patient who mimicked the simulated manikin. Students were overall positive about the implications for being able to view a patient during the simulations, and most students recommended using the technology in the future. Overall, students reported perceived realism with augmented reality using Google Glass. However, there were technical and usability challenges with the device. Conclusions: As newer portable and consumer-focused technologies become available, augmented reality is increasingly being used as a teaching and learning tool to make clinical simulations more realistic for health science students. We found Google Glass feasible and acceptable as a tool for augmented reality in clinical simulations. ", doi="10.2196/mededu.5159", url="http://mededu.jmir.org/2016/1/e2/", url="http://www.ncbi.nlm.nih.gov/pubmed/27731862" } @Article{info:doi/10.2196/mededu.4443, author="Zhu, Egui and Lilienthal, Anneliese and Shluzas, Aquino Lauren and Masiello, Italo and Zary, Nabil", title="Design of Mobile Augmented Reality in Health Care Education: A Theory-Driven Framework", journal="JMIR Medical Education", year="2015", month="Sep", day="18", volume="1", number="2", pages="e10", keywords="augmented reality", keywords="health care education", keywords="antibiotics", keywords="general practitioners", keywords="learning environment", keywords="learning theory", keywords="mobile technology", abstract="Background: Augmented reality (AR) is increasingly used across a range of subject areas in health care education as health care settings partner to bridge the gap between knowledge and practice. As the first contact with patients, general practitioners (GPs) are important in the battle against a global health threat, the spread of antibiotic resistance. AR has potential as a practical tool for GPs to combine learning and practice in the rational use of antibiotics. Objective: This paper was driven by learning theory to develop a mobile augmented reality education (MARE) design framework. The primary goal of the framework is to guide the development of AR educational apps. This study focuses on (1) identifying suitable learning theories for guiding the design of AR education apps, (2) integrating learning outcomes and learning theories to support health care education through AR, and (3) applying the design framework in the context of improving GPs' rational use of antibiotics. Methods: The design framework was first constructed with the conceptual framework analysis method. Data were collected from multidisciplinary publications and reference materials and were analyzed with directed content analysis to identify key concepts and their relationships. Then the design framework was applied to a health care educational challenge. Results: The proposed MARE framework consists of three hierarchical layers: the foundation, function, and outcome layers. Three learning theories---situated, experiential, and transformative learning---provide foundational support based on differing views of the relationships among learning, practice, and the environment. The function layer depends upon the learners' personal paradigms and indicates how health care learning could be achieved with MARE. The outcome layer analyzes different learning abilities, from knowledge to the practice level, to clarify learning objectives and expectations and to avoid teaching pitched at the wrong level. Suggestions for learning activities and the requirements of the learning environment form the foundation for AR to fill the gap between learning outcomes and medical learners' personal paradigms. With the design framework, the expected rational use of antibiotics by GPs is described and is easy to execute and evaluate. The comparison of specific expected abilities with the GP personal paradigm helps solidify the GP practical learning objectives and helps design the learning environment and activities. The learning environment and activities were supported by learning theories. Conclusions: This paper describes a framework for guiding the design, development, and application of mobile AR for medical education in the health care setting. The framework is theory driven with an understanding of the characteristics of AR and specific medical disciplines toward helping medical education improve professional development from knowledge to practice. Future research will use the framework as a guide for developing AR apps in practice to validate and improve the design framework. ", doi="10.2196/mededu.4443", url="http://mededu.jmir.org/2015/2/e10/", url="http://www.ncbi.nlm.nih.gov/pubmed/27731839" } @Article{info:doi/10.2196/jmir.3225, author="Albrecht, Urs-Vito and von Jan, Ute and Kuebler, Joachim and Zoeller, Christoph and Lacher, Martin and Muensterer, J. Oliver and Ettinger, Max and Klintschar, Michael and Hagemeier, Lars", title="Google Glass for Documentation of Medical Findings: Evaluation in Forensic Medicine", journal="J Med Internet Res", year="2014", month="Feb", day="12", volume="16", number="2", pages="e53", keywords="Google Glass", keywords="forensic medicine", keywords="autopsy, postmortem examination", keywords="documentation", abstract="Background: Google Glass is a promising premarket device that includes an optical head-mounted display. Several proof of concept reports exist, but there is little scientific evidence regarding its use in a medical setting. Objective: The objective of this study was to empirically determine the feasibility of deploying Glass in a forensics setting. Methods: Glass was used in combination with a self-developed app that allowed for hands-free operation during autopsy and postmortem examinations of 4 decedents performed by 2 physicians. A digital single-lens reflex (DSLR) camera was used for image comparison. In addition, 6 forensic examiners (3 male, 3 female; age range 23-48 years, age mean 32.8 years, SD 9.6; mean work experience 6.2 years, SD 8.5) were asked to evaluate 159 images for image quality on a 5-point Likert scale, specifically color discrimination, brightness, sharpness, and their satisfaction with the acquired region of interest. Statistical evaluations were performed to determine how Glass compares with conventionally acquired digital images. Results: All images received good (median 4) and very good ratings (median 5) for all 4 categories. Autopsy images taken by Glass (n=32) received significantly lower ratings than those acquired by DSLR camera (n=17) (region of interest: z=--5.154, P<.001; sharpness: z=--7.898, P<.001; color: z=--4.407, P<.001, brightness: z=--3.187, P=.001). For 110 images of postmortem examinations (Glass: n=54, DSLR camera: n=56), ratings for region of interest (z=--8.390, P<.001) and brightness (z=--540, P=.007) were significantly lower. For interrater reliability, intraclass correlation (ICC) values were good for autopsy (ICC=.723, 95\% CI .667-.771, P<.001) and postmortem examination (ICC=.758, 95\% CI .727-.787, P<.001). Postmortem examinations performed using Glass took 42.6 seconds longer than those done with the DSLR camera (z=--2.100, P=.04 using Wilcoxon signed rank test). The battery charge of Glass quickly decreased; an average 5.5\% (SD 1.85) of its battery capacity was spent per postmortem examination (0.81\% per minute or 0.79\% per picture). Conclusions: Glass was efficient for acquiring images for documentation in forensic medicine, but the image quality was inferior compared to a DSLR camera. Images taken with Glass received significantly lower ratings for all 4 categories in an autopsy setting and for region of interest and brightness in postmortem examination. The effort necessary for achieving the objectives was higher when using the device compared to the DSLR camera thus extending the postmortem examination duration. Its relative high power consumption and low battery capacity is also a disadvantage. At the current stage of development, Glass may be an adequate tool for education. For deployment in clinical care, issues such as hygiene, data protection, and privacy need to be addressed and are currently limiting chances for professional use. ", doi="10.2196/jmir.3225", url="http://www.jmir.org/2014/2/e53/", url="http://www.ncbi.nlm.nih.gov/pubmed/24521935" } @Article{info:doi/10.2196/jmir.2497, author="Albrecht, Urs-Vito and Folta-Schoofs, Kristian and Behrends, Marianne and von Jan, Ute", title="Effects of Mobile Augmented Reality Learning Compared to Textbook Learning on Medical Students: Randomized Controlled Pilot Study", journal="J Med Internet Res", year="2013", month="Aug", day="20", volume="15", number="8", pages="e182", keywords="problem-based learning", keywords="cellular phone", keywords="education", keywords="medical", keywords="emotions", abstract="Background: By adding new levels of experience, mobile Augmented Reality (mAR) can significantly increase the attractiveness of mobile learning applications in medical education. Objective: To compare the impact of the heightened realism of a self-developed mAR blended learning environment (mARble) on learners to textbook material, especially for ethically sensitive subjects such as forensic medicine, while taking into account basic psychological aspects (usability and higher level of emotional involvement) as well as learning outcomes (increased learning efficiency). Methods: A prestudy was conducted based on a convenience sample of 10 third-year medical students. The initial emotional status was captured using the ``Profile of Mood States'' questionnaire (POMS, German variation); previous knowledge about forensic medicine was determined using a 10-item single-choice (SC) test. During the 30-minute learning period, the students were randomized into two groups: the first group consisted of pairs of students, each equipped with one iPhone with a preinstalled copy of mARble, while the second group was provided with textbook material. Subsequently, both groups were asked to once again complete the POMS questionnaire and SC test to measure changes in emotional state and knowledge gain. Usability as well as pragmatic and hedonic qualities of the learning material was captured using AttrakDiff2 questionnaires. Data evaluation was conducted anonymously. Descriptive statistics for the score in total and the subgroups were calculated before and after the intervention. The scores of both groups were tested against each other using paired and unpaired signed-rank tests. An item analysis was performed for the SC test to objectify difficulty and selectivity. Results: Statistically significant, the mARble group (6/10) showed greater knowledge gain than the control group (4/10) (Wilcoxon z=2.232, P=.03). The item analysis of the SC test showed a difficulty of P=0.768 (s=0.09) and a selectivity of RPB=0.2. For mARble, fatigue (z=2.214, P=.03) and numbness (z=2.07, P=.04) decreased with statistical significance when comparing pre- and post-tests. Vigor rose slightly, while irritability did not increase significantly. Changes in the control group were insignificant. Regarding hedonic quality (identification, stimulation, attractiveness), there were significant differences between mARble (mean 1.179, CI ?0.440 to 0.440) and the book chapter (mean ?0.982, CI ?0.959 to 0.959); the pragmatic quality mean only differed slightly. Conclusions: The mARble group performed considerably better regarding learning efficiency; there are hints for activating components of the mAR concept that may serve to fascinate the participants and possibly boost interest in the topic for the remainder of the class. While the small sample size reduces our study's conclusiveness, its design seems appropriate for determining the effects of interactive eLearning material with respect to emotions, learning efficiency, and hedonic and pragmatic qualities using a larger group. Trial Registration: German Clinical Trial Register (DRKS), DRKS-ID: DRKS00004685; https://drks-neu.uniklinik-freiburg.de/drks\_web/navigate.do?navigationId=trial.HTML\&TRIAL\_ID=DRKS00004685. ", doi="10.2196/jmir.2497", url="http://www.jmir.org/2013/8/e182/", url="http://www.ncbi.nlm.nih.gov/pubmed/23963306" }