<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Mhealth Uhealth</journal-id><journal-id journal-id-type="publisher-id">mhealth</journal-id><journal-id journal-id-type="index">13</journal-id><journal-title>JMIR mHealth and uHealth</journal-title><abbrev-journal-title>JMIR Mhealth Uhealth</abbrev-journal-title><issn pub-type="epub">2291-5222</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v14i1e82412</article-id><article-id pub-id-type="doi">10.2196/82412</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Evaluation of Smartphone Camera Positioning on Artificial Intelligence Pose Estimation Accuracy for Exercise Detection: Observational Study</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Oliosi</surname><given-names>Eduarda</given-names></name><degrees>BS, MS</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Ferreira</surname><given-names>Soraia</given-names></name><degrees>MS, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="aff" rid="aff5">5</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Giordano</surname><given-names>Ana Paula</given-names></name><degrees>MS, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff6">6</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Viveiros</surname><given-names>Guilherme</given-names></name><degrees>MS</degrees><xref ref-type="aff" rid="aff7">7</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Parraca</surname><given-names>Jos&#x00E9;</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="aff" rid="aff5">5</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Pereira</surname><given-names>Paulo</given-names></name><degrees>MS</degrees><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="aff" rid="aff5">5</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Guede-Fern&#x00E1;ndez</surname><given-names>Federico</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Azevedo</surname><given-names>Salom&#x00E9;</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff7">7</xref><xref ref-type="aff" rid="aff8">8</xref><xref ref-type="aff" rid="aff9">9</xref></contrib></contrib-group><aff id="aff1"><institution>Value for Health CoLAB</institution><addr-line>15 Fontes Pereira de Melo Ave, 2nd Fl, Right</addr-line><addr-line>Lisbon</addr-line><country>Portugal</country></aff><aff id="aff2"><institution>Laboratory for Instrumentation, Biomedical Engineering and Radiation Physics (LIBPhys&#x2011;UNL), Physics Department, Nova School of Science and Technology, Nova University of Lisbon</institution><addr-line>Caparica</addr-line><country>Portugal</country></aff><aff id="aff3"><institution>Research Centre for Physical Activity, Health and Leisure (CIAFEL), Faculty of Sport, University of Porto</institution><addr-line>Porto</addr-line><country>Portugal</country></aff><aff id="aff4"><institution>Departamento de Desporto e Sa&#x00FA;de, Escola de Sa&#x00FA;de e Desenvolvimento Humano, Universidade de &#x00C9;vora</institution><addr-line>&#x00C9;vora</addr-line><country>Portugal</country></aff><aff id="aff5"><institution>Comprehensive Health Research Centre (CHRC), Escola de Sa&#x00FA;de e Desenvolvimento Humano, Departamento de Ci&#x00EA;ncias M&#x00E9;dicas, Universidade de &#x00C9;vora</institution><addr-line>&#x00C9;vora</addr-line><country>Portugal</country></aff><aff id="aff6"><institution>Research Unit in Business and Economics, Cat&#x00F3;lica Lisbon School of Business and Economics (CUBE), Catholic University of Portugal</institution><addr-line>Lisbon</addr-line><country>Portugal</country></aff><aff id="aff7"><institution>Department of Machine Learning, Dotmoovs</institution><addr-line>Braga</addr-line><country>Portugal</country></aff><aff id="aff8"><institution>Department of Engineering and Management, Instituto Superior T&#x00E9;cnico (CEG&#x2011;IST), University of Lisbon</institution><addr-line>Lisbon</addr-line><country>Portugal</country></aff><aff id="aff9"><institution>Comprehensive Health Research Center (CHRC), Nova Medical School, Nova University of Lisbon</institution><addr-line>Lisbon</addr-line><country>Portugal</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Buis</surname><given-names>Lorraine</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Morales-Rosales</surname><given-names>Luis Alberto</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Sasatani</surname><given-names>Takuya</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Eduarda Oliosi, BS, MS, Value for Health CoLAB, 15 Fontes Pereira de Melo Ave, 2nd Fl, Right, Lisbon, 1050&#x2011;115, Portugal, 351 937091767; <email>eduarda.oliosi@vohcolab.org</email></corresp></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>5</day><month>3</month><year>2026</year></pub-date><volume>14</volume><elocation-id>e82412</elocation-id><history><date date-type="received"><day>14</day><month>08</month><year>2025</year></date><date date-type="rev-recd"><day>04</day><month>11</month><year>2025</year></date><date date-type="accepted"><day>27</day><month>01</month><year>2026</year></date></history><copyright-statement>&#x00A9; Eduarda Oliosi, Soraia Ferreira, Ana Paula Giordano, Guilherme Viveiros, Jos&#x00E9; Parraca, Paulo Pereira, Federico Guede-Fern&#x00E1;ndez, Salom&#x00E9; Azevedo. Originally published in JMIR mHealth and uHealth (<ext-link ext-link-type="uri" xlink:href="https://mhealth.jmir.org">https://mhealth.jmir.org</ext-link>), 5.3.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR mHealth and uHealth, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://mhealth.jmir.org/">https://mhealth.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://mhealth.jmir.org/2026/1/e82412"/><abstract><sec><title>Background</title><p>Artificial intelligence (AI)&#x2013;driven pose estimation (PE) offers a scalable and cost-effective solution to track exercises in mobile health apps. However, occlusion, influenced by camera angle and distance, can reduce detection accuracy and repetition counting precision. The influence of smartphone positioning on these performance metrics remains underexplored in controlled studies.</p></sec><sec><title>Objective</title><p>The study aimed to examine how smartphone camera angle (front, side, and diagonal) and distance (90 cm, 180 cm, 200 cm, and 360 cm) affect detection performance and repetition counting accuracy during push-ups and squats using AI-based PE.</p></sec><sec sec-type="methods"><title>Methods</title><p>In this cross-sectional, within-subject study, 44 healthy university students (9 [20.5%] female participants; mean age 20.3 y, SD 0.4 y; mean BMI 23.2, SD 0.6 kg/m&#x00B2;) were assigned to perform either squats or push-ups. Each participant completed their assigned exercise across 12 predefined smartphone camera configurations, yielding approximately 264 squat trials (n=22) and 264 push-up trials (n=22). Each trial consisted of an average of 5 repetitions, totaling approximately 1320 repetitions per exercise. PE performance was assessed using binary classification accuracy, detection rate, and mean absolute error (MAE) for repetition counting. Generalized linear mixed-effects models evaluated classification odds, linear mixed-effects models analyzed MAE, and Tukey-adjusted post hoc tests followed significant effects.</p></sec><sec sec-type="results"><title>Results</title><p>The mean detection rate was 61.1% (SD 48.8%) for push-ups and 61.5% (SD 48.7%) for squats, with MAEs of 1.08 (SD 1.78) and 1.11 (SD 1.82) repetitions, respectively. Push-ups were most accurately detected from diagonal views at 90 to 180 cm (up to 85.7% detection; MAE=0.28) and least accurately from the front at 360 cm (20%; MAE=2.70). Squats performed best from a diagonal view at 200 cm (95.5%; MAE=0.05) and worst from the side at 90 cm (0%; MAE=5). Generalized linear mixed models showed that for push-ups, the front 90 cm and diagonal 360 cm views significantly reduced classification odds compared to the side 90 cm view (<italic>P</italic>=.03 and <italic>P</italic>=.04, respectively), whereas for squats, diagonal and front views significantly outperformed side views across all distances (<italic>P</italic>&#x003C;.001). Post hoc tests confirmed that for push-ups, diagonal close or mid-range views had significantly lower MAEs than far front views, and for squats, diagonal and front views at 180 to 200 cm achieved the highest accuracy and lowest MAEs (<italic>P</italic>&#x003C;.05).</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>AI-based PE effectiveness for exercise tracking is significantly affected by smartphone positioning. Diagonal and frontal views at mid-range distances (180&#x2010;200 cm) provided the highest detection accuracy and counting precision. These findings offer actionable guidance for developers, clinicians, coaches, and users optimizing mobile health exercise monitoring.</p></sec></abstract><kwd-group><kwd>computer vision</kwd><kwd>digital health</kwd><kwd>human pose estimation</kwd><kwd>human activity recognition</kwd><kwd>mobile health</kwd><kwd>mhealth</kwd><kwd>mobile apps</kwd><kwd>physical activity</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Physical inactivity is a major global public health concern, contributing to higher rates of chronic diseases, reduced quality of life, and increased health care costs [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref3">3</xref>]. While the health benefits of regular physical activity are well documented, ranging from improved cardiovascular fitness and muscle strength to enhanced mental well-being, participation levels remain insufficient worldwide [<xref ref-type="bibr" rid="ref4">4</xref>]. Current estimates indicate that over 31% of adults and 81% of adolescents fail to meet the World Health Organization&#x2019;s recommended physical activity guidelines [<xref ref-type="bibr" rid="ref5">5</xref>]. The consequences of physical inactivity are particularly severe for populations managing chronic or neurological conditions, for whom movement-based interventions can serve as both preventive and therapeutic measures [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref7">7</xref>].</p><p>In response to the demand for scalable and accessible physical activity tools, mobile health (mHealth) apps have emerged as a cost-effective means of promoting exercise participation [<xref ref-type="bibr" rid="ref3">3</xref>]. A notable innovation in this field is pose estimation (PE), or markerless motion capture, which uses computer vision to detect joint positions from standard video footage without the need for specialized equipment [<xref ref-type="bibr" rid="ref8">8</xref>]. Open-source PE models, such as OpenPose (Carnegie Mellon University), BlazePose (Google), and MoveNet (Google), facilitate real-time PE via smartphones or webcams. This enables the provision of remote, personalized feedback [<xref ref-type="bibr" rid="ref9">9</xref>-<xref ref-type="bibr" rid="ref13">13</xref>]. In biomedical contexts, PE can identify movement impairment patterns and support rehabilitation or neurological diagnoses [<xref ref-type="bibr" rid="ref8">8</xref>]. It is also widely applied in fitness, telerehabilitation, occupational health, and sports to track joint angles, velocity, form, and repetitions [<xref ref-type="bibr" rid="ref14">14</xref>-<xref ref-type="bibr" rid="ref18">18</xref>]. Validation studies have demonstrated strong agreement with the gold standard motion capture [<xref ref-type="bibr" rid="ref19">19</xref>-<xref ref-type="bibr" rid="ref21">21</xref>], and recent studies integrating deep learning have reported repetition-counting accuracies above 90% [<xref ref-type="bibr" rid="ref22">22</xref>-<xref ref-type="bibr" rid="ref25">25</xref>].</p><p>Despite recent advances, most PE systems have only been evaluated in controlled laboratory environments. However, real-world mHealth apps occur in uncontrolled settings where factors such as handheld devices held at different distances and angles, and partial obstructions (eg, from pets, furniture, or body position) can significantly degrade performance [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref26">26</xref>]. These challenges are particularly pronounced for dynamic, whole-body movements such as squats and push-ups, where tracking accuracy can vary depending on body position and elevation. Although a few commercial solutions have begun to experiment with camera-based self-assessment (eg, Halo Movement [Amazon] [<xref ref-type="bibr" rid="ref27">27</xref>] and Kaia Health GmbH [<xref ref-type="bibr" rid="ref28">28</xref>]), there is still a lack of empirical studies on how smartphone positioning affects the reliability of PE in practical settings [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref27">27</xref>]. Improving accuracy in these contexts will likely require advances not only in 2D keypoint detection but also in our understanding of optimal camera placement and how to transform 2D keypoint data into accurate spatial representations.</p><p>This study systematically evaluates the impact of smartphone camera angle and distance on the performance of artificial intelligence (AI)&#x2013;based PE when counting 2 foundational bodyweight exercises: squats and push-ups. These exercises are commonly used for fitness and rehabilitation tracking. They involve multijoint movements and present common occlusion challenges. Repetition counts are obtained from 2D PE across 12 camera configurations (3 angles &#x00D7; 4 distances) and benchmarked against expert-labeled ground truth. The aim is to examine how smartphone recording position, specifically camera angle and distance, affects detection performance and repetition counting accuracy, and to inform best practices for using mHealth systems in real-world environments. This work is part of the Blockchain.PT initiative (Project No. 51), which supports sustainable blockchain-based digital innovation in the health and sports sectors.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Study Design</title><p>This cross-sectional, within-subject study was designed to assess the influence of smartphone camera configurations, specifically angle and distance, on the detection of PE during squats and push-ups. These exercises were selected due to their complex multijoint biomechanics and typical self-occlusions (eg, torso obscuring limbs in push-ups).</p></sec><sec id="s2-2"><title>Setting</title><p>All data were collected in a controlled laboratory environment at the School of Health and Human Development, University of &#x00C9;vora, Portugal, in May 2025. Environmental conditions, including lighting and temperature, were standardized across trials. Smartphone placement was controlled at predefined angles and distances to ensure reproducible video capture. The laboratory provided enough space for participants to safely perform push-ups and squats.</p></sec><sec id="s2-3"><title>Participants</title><p>Participants were recruited from undergraduate courses at the University of &#x00C9;vora in Portugal through a targeted study announcement that specified the eligibility criteria, study procedures, and recommended attire for physical exercise during classes. Eligible volunteers were identified through convenience sampling with eligibility screening.</p><p>A total of 44 healthy university students participated and self-reported their status. Inclusion criteria were: (1) aged 18 years or over; (2) able to safely perform squats and push-ups; (3) not having used sedative or balance-impairing medications within the last 24 hours; and (4) absence of neurological or musculoskeletal conditions that could impair movement.</p></sec><sec id="s2-4"><title>Ethical Considerations</title><p>The study protocol was reviewed and approved by the Ethics Committee of the University of &#x00C9;vora (GD/27378/2024), in accordance with institutional and international standards for human participant research. All procedures complied with the Declaration of Helsinki and the General Data Protection Regulation. Written informed consent was obtained from all participants prior to enrollment. Participants were informed about the study objectives, procedures, and potential risks and were explicitly advised of their right to withdraw at any time without penalty. To ensure privacy and confidentiality, all collected data were anonymized and coded to prevent participant identification. Data were securely stored on password-protected institutional servers accessible only to the research team. No monetary or material compensation was provided for participation in this study.</p></sec><sec id="s2-5"><title>Exercise Protocol and Dataset</title><p>Participants were first briefed on the study protocol and completed a demographic questionnaire to record baseline characteristics. For the squat task, participants stood with feet shoulder-width apart and arms extended forward, performing repetitions by lowering their hips until their thighs were parallel to the floor. Male participants performed push-ups in a standard plank position, while female participants were allowed a modified knee-supported variation. All exercise procedures adhered to the 11th edition of the <italic>American College of Sports Medicine</italic> guidelines (2021) [<xref ref-type="bibr" rid="ref29">29</xref>], which recommend these variations to accommodate differences in strength and fitness levels. To reduce fatigue-related performance decline and ensure high-quality movement data, participants were assigned to perform either squats or push-ups based on personal preference. Of the 44 participants, 22 (16 male participants and 6 female participants) completed the squat protocol, and 22 (19 male participants and 3 female participants) completed the push-up protocol. Each participant performed their assigned exercise across all 12 predefined camera configurations, yielding 264 squat trials (22&#x00D7;12) and 264 push-up trials (22&#x00D7;12). Each trial consisted of a continuous movement sequence, averaging approximately 5 repetitions per video [<xref ref-type="bibr" rid="ref16">16</xref>], totaling approximately 1320 repetitions per exercise type (264 trials &#x00D7; ~5 reps).</p></sec><sec id="s2-6"><title>Experimental Setup</title><p>To capture a representative sample of commonly used consumer devices, exercise sessions were recorded using three smartphone models, iPhone 11 (Apple Inc.), iPhone 13 (Apple Inc.), and Samsung Galaxy A52 (Samsung Electronics), each capturing video at 1080 p resolution and 30 frames per second. Smartphones were positioned horizontally on the floor at 3 fixed angles relative to the participant&#x2019;s body: frontal (0&#x00B0;), diagonal (45&#x00B0;), and lateral (90&#x00B0;). Recordings were conducted at 4 distances, including 180 cm based on prior literature [<xref ref-type="bibr" rid="ref30">30</xref>], and 3 additional distances (90 cm, 200 cm, and 360 cm) defined by the research team to test a wider range of realistic recording conditions. This configuration resulted in 12 distinct camera setups (3 angles &#x00D7; 4 distances). All smartphone positions were marked and standardized to ensure consistency across trials.</p><p>This experimental setup is contextualized within existing research by <xref ref-type="table" rid="table1">Table 1</xref>, which summarizes previous studies on push-up and squat detection, detailing camera perspectives, distances, PE methods, and reported performance metrics. The comparison shows that most prior studies rely on limited viewpoints and fixed distances, restricting generalizability. In contrast, our multiangle, multidistance dataset addresses these constraints, providing a more diverse and representative resource for evaluating PE performance.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Summary of state-of-the-art push-up and squat pose estimation datasets and methods.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Reference</td><td align="left" valign="bottom">Exercise</td><td align="left" valign="bottom">Camera perspectives</td><td align="left" valign="bottom">Camera distances</td><td align="left" valign="bottom">PE<sup><xref ref-type="table-fn" rid="table1fn1">n</xref></sup> method</td><td align="left" valign="bottom">Instrument</td><td align="left" valign="bottom">Key evaluation metrics</td><td align="left" valign="bottom">Dataset</td></tr></thead><tbody><tr><td align="left" valign="top">Park et al [<xref ref-type="bibr" rid="ref31">31</xref>]</td><td align="left" valign="top">Push-up</td><td align="left" valign="top">Frontal, side</td><td align="left" valign="top">Full body visible, N/R<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup> value</td><td align="left" valign="top">OpenPose</td><td align="left" valign="top">2 cameras</td><td align="left" valign="top">ACC<sup><xref ref-type="table-fn" rid="table1fn3">c</xref></sup>=90%</td><td align="left" valign="top">Custom dataset of<break/>n=12</td></tr><tr><td align="left" valign="top">Youssef et al [<xref ref-type="bibr" rid="ref32">32</xref>]</td><td align="left" valign="top">Squat</td><td align="left" valign="top">Frontal, side</td><td align="left" valign="top">N/R</td><td align="left" valign="top">BlazePose</td><td align="left" valign="top">Mobile devices + inertial sensors</td><td align="left" valign="top">ACC=94%</td><td align="left" valign="top">EJUST-SQUAT-21<sup><xref ref-type="table-fn" rid="table1fn4">o</xref></sup>, single individual, MM-Fit<sup><xref ref-type="table-fn" rid="table1fn5">e</xref></sup> datasets</td></tr><tr><td align="left" valign="top">Hande et al [<xref ref-type="bibr" rid="ref33">33</xref>]</td><td align="left" valign="top">Squat and others</td><td align="left" valign="top">Frontal, side</td><td align="left" valign="top">Full body visible, N/R value</td><td align="left" valign="top">OpenPose, MobileNet (Google), InceptionV3 (Google)</td><td align="left" valign="top">Single camera</td><td align="left" valign="top">ACC =~98% (MobileNet),~96% (InceptionV3)</td><td align="left" valign="top">Penn Action Dataset</td></tr><tr><td align="left" valign="top">Chae et al [<xref ref-type="bibr" rid="ref34">34</xref>]</td><td align="left" valign="top">Squat</td><td align="left" valign="top">Frontal, diagonal</td><td align="left" valign="top">250 cm Kinect (Microsoft),<break/>380 cm webcam</td><td align="left" valign="top">OpenPose, Temporal Conv1D<sup><xref ref-type="table-fn" rid="table1fn6">f</xref></sup>, BiLSTM<sup><xref ref-type="table-fn" rid="table1fn7">g</xref></sup></td><td align="left" valign="top">Kinect + webcam</td><td align="left" valign="top">ACC=85%</td><td align="left" valign="top">Custom dataset of n=52</td></tr><tr><td align="left" valign="top">Chariar et al [<xref ref-type="bibr" rid="ref35">35</xref>]</td><td align="left" valign="top">Squat</td><td align="left" valign="top">Frontal</td><td align="left" valign="top">Distance N/R; 120 cm height</td><td align="left" valign="top">MediaPipe (Google), Bi-GRU<sup><xref ref-type="table-fn" rid="table1fn8">h</xref></sup></td><td align="left" valign="top">2 depth cameras</td><td align="left" valign="top">ACC=94%</td><td align="left" valign="top">Custom dataset of n~50</td></tr><tr><td align="left" valign="top">Zhang et al [<xref ref-type="bibr" rid="ref36">36</xref>]</td><td align="left" valign="top">Push-up</td><td align="left" valign="top">Frontal, side</td><td align="left" valign="top">N/R</td><td align="left" valign="top">MoveNet; angle-heuristic; Optical flow</td><td align="left" valign="top">N/R</td><td align="left" valign="top">Average <italic>F</italic><sub>1</sub>-score: angle-heuristic=0.85 (side &#x003E; front); pose classification=0.94 (side &#x003E; front); optical flow=0.79 (front &#x003E; side)</td><td align="left" valign="top">Kaggle &#x201C;Push-up Exercise&#x201D; dataset</td></tr><tr><td align="left" valign="top">Japhne et al [<xref ref-type="bibr" rid="ref24">24</xref>]</td><td align="left" valign="top">Push-up, squat, and others</td><td align="left" valign="top">N/R, full body visible</td><td align="left" valign="top">200 cm</td><td align="left" valign="top">OpenPose, LSTM<sup><xref ref-type="table-fn" rid="table1fn9">i</xref></sup></td><td align="left" valign="top">Mobile devices</td><td align="left" valign="top">Push-up: ACC=~99%;<break/>Squat: ACC=~99%</td><td align="left" valign="top">Custom dataset of n=3</td></tr><tr><td align="left" valign="top">Mercadal-Baudart et al [<xref ref-type="bibr" rid="ref21">21</xref>]</td><td align="left" valign="top">Squat and others</td><td align="left" valign="top">Frontal; multiangle validation</td><td align="left" valign="top">~300 cm radius; 150 cm height</td><td align="left" valign="top">Detectron2 (Meta Platforms, Inc), Strided Transformer</td><td align="left" valign="top">Mobile devices</td><td align="left" valign="top">RMSE<sup><xref ref-type="table-fn" rid="table1fn10">j</xref></sup> of joint angles versus VICON<sup><xref ref-type="table-fn" rid="table1fn11">k</xref></sup> (Vicon Motion Systems): &#x003C;10&#x00B0; for most joints (shin, knee, hip, trunk, and spine), &#x003C;15&#x00B0; for shoulder and ASIS<sup><xref ref-type="table-fn" rid="table1fn12">l</xref></sup> (notably front squats)</td><td align="left" valign="top">Custom dataset of n=8&#x2010;12</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>PE: pose estimation.</p></fn><fn id="table1fn2"><p><sup>b</sup>N/R: not reported.</p></fn><fn id="table1fn3"><p><sup>c</sup>ACC: accuracy.</p></fn><fn id="table1fn4"><p><sup>d</sup>EJUST-SQUAT-21: Egypt-Japan University squat dataset 2021.</p></fn><fn id="table1fn5"><p><sup>e</sup>MM-Fit: multimodal fitness dataset.</p></fn><fn id="table1fn6"><p><sup>f</sup>Conv1D: one-dimensional convolutional neural network.</p></fn><fn id="table1fn7"><p><sup>g</sup>BiLSTM: bidirectional long short-term memory.</p></fn><fn id="table1fn8"><p><sup>h</sup>Bi-GRU: bidirectional gated recurrent unit.</p></fn><fn id="table1fn9"><p><sup>i</sup>LSTM: long short-term memory.</p></fn><fn id="table1fn10"><p><sup>j</sup>RMSE: root mean square error.</p></fn><fn id="table1fn11"><p><sup>k</sup>VICON: Vicon motion capture system.</p></fn><fn id="table1fn12"><p><sup>l</sup>ASIS: anterior superior iliac spine.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s2-7"><title>Pose Estimation and Processing</title><p>Video data were processed using a multistage PE and repetition detection pipeline designed for real-world deployment on the Dotmoovs mobile platform [<xref ref-type="bibr" rid="ref37">37</xref>]. The pipeline was optimized for low-latency, on-device inference while maintaining sufficient accuracy for exercise recognition.</p><sec id="s2-7-1"><title>Model Selection</title><p>A lightweight 2D PE model was prioritized to ensure real-time performance on consumer smartphones and without requiring external sensors. Among candidate architectures (eg, PoseNet [Google], BlazePose, and MoveNet), MoveNet was selected due to its computational efficiency, suitability for edge inference, and enhanced generalization to fitness-related movements. MoveNet was trained on the COCO (Common Objects in Context) dataset (Microsoft) and Google&#x2019;s internal Active dataset, which includes annotated yoga, fitness, and dance poses exhibiting substantial motion variability [<xref ref-type="bibr" rid="ref38">38</xref>-<xref ref-type="bibr" rid="ref40">40</xref>]. For cloud-based processing, EvoPose2D (Huawei Technologies Co, Ltd) and ViTPose (Microsoft Research Asia) were implemented to achieve higher precision at the cost of increased computational load. This hybrid configuration balances latency, accuracy, and hardware constraints, supporting both on-device responsiveness and scalable cloud inference [<xref ref-type="bibr" rid="ref39">39</xref>]. The models were trained on approximately 1 million samples, reserving 10,000 samples each for validation and testing, ensuring robustness and generalizability across diverse movement types.</p></sec><sec id="s2-7-2"><title>Training Datasets and Domain Adaptation</title><p>To improve robustness for fitness-specific postures, the training pipeline incorporated multiple publicly available datasets: SMART (Sports Motion and Recognition Tasks) [<xref ref-type="bibr" rid="ref41">41</xref>], LSP (Leeds Sports Pose) Extended [<xref ref-type="bibr" rid="ref42">42</xref>], Penn Action [<xref ref-type="bibr" rid="ref43">43</xref>], and MPII Human Pose [<xref ref-type="bibr" rid="ref44">44</xref>]. Despite these datasets, complex movements, particularly floor-based exercises (eg, push-ups), remained challenging for accurate PE. This motivated the creation of DotPose, a custom internal dataset designed to complement existing datasets and improve the detection of occluded limbs and challenging postures. The combination of DotPose with public datasets further mitigates potential biases related to body types, cultural differences, exercise contexts, and environmental scenarios, thereby enhancing generalizability across diverse users and real-world conditions.</p></sec><sec id="s2-7-3"><title>Pipeline Implementation</title><p>The PE outputs (17 keypoints per frame) were processed through a proprietary deep learning module performing two primary tasks:</p><list list-type="order"><list-item><p>State classification: A lightweight neural network classifies each frame into discrete movement states (eg, squat-down, squat-up, or other).</p></list-item><list-item><p>Repetition counting: A Markov-chain-based algorithm tracks temporal transitions between states, triggering a repetition event when a predefined sequence (eg, up-down-up) is detected.</p></list-item></list><p>This architecture ensures temporal stability and robustness against intermittent keypoint noise. The pipeline was benchmarked for real-time inference, with MoveNet demonstrating subsecond latency, suitable for interactive mobile feedback. Finally, all pose outputs were exported in comma-separated values format to enable statistical analysis of accuracy and classification metrics across varying camera angles and distances.</p></sec></sec><sec id="s2-8"><title>Manual Annotation</title><p>All video samples were independently annotated by 2 trained raters (an exercise physiologist and a physical therapist), who followed a predefined schema based on standardized exercise movement criteria. Each annotator labeled the exercise type and manually counted the number of valid repetitions per trial. No discrepancies were observed between raters. These annotations were then used to evaluate the accuracy of detection and repetition counting.</p></sec><sec id="s2-9"><title>Statistical Analysis</title><p>All analyses were conducted in RStudio (version 4.5.0; R Foundation for Statistical Computing) using established packages including lme4, lmerTest, emmeans, and performance. Performance metrics were reported in alignment with prior benchmark studies in human activity recognition and AI-based motion analysis [<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref45">45</xref>]. The primary outcome was a binary accuracy indicator, coded as 1 when the system&#x2019;s predicted repetition count exactly matched the ground truth, and as 0 otherwise. This was modeled using generalized linear mixed-effects models (GLMMs) with a logit link. The secondary outcome was the mean absolute error (MAE), defined as the absolute difference between predicted and ground-truth repetition counts per video, modeled using linear mixed effects models (LMMs) with a Gaussian distribution. For descriptive reporting purposes, we also calculated a detection rate for each video, defined as (prediction or ground truth) x100, representing the percentage of repetitions counted correctly. Both models included camera angle and distance as fixed effects, with participant ID modeled as a random intercept to account for intraindividual variability and repeated measures. When the fixed effects were statistically significant (<italic>P</italic>&#x003C;.05), Tukey-adjusted post hoc contrasts were conducted to compare condition pairs. Odds ratios (ORs) were computed by exponentiating the log odds from the GLMM to facilitate interpretation.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Participant Characteristics</title><p>Our dataset comprised 44 healthy university students from the University of &#x00C9;vora, Portugal, including 9 (20.5%) females and 35 (79.5%) males. The participants had a mean age of 20.31 (SD 0.40) years, a mean height of 1.74 (SD 0.09) meters, and a mean body mass of 70 kg (SD 14.72), resulting in a mean BMI of 23.16 kg/m&#x00B2; (SD 0.61).</p><p>Based on previous studies assessing AI-based PE for push-up and squat detection [<xref ref-type="bibr" rid="ref24">24</xref>], we assumed a medium-to-large expected effect size (Cohen <italic>d</italic>=0.65) for power estimation. A post hoc analysis conducted in G*Power 3.1 (Heinrich Heine University D&#x00FC;sseldorf) indicated that the current sample size (n=44) provides approximately 0.74 statistical power to detect this effect at a 2-tailed significance level of <italic>&#x03B1;</italic>=.05, suggesting adequate sensitivity for detecting meaningful differences in PE performance across camera conditions.</p></sec><sec id="s3-2"><title>Push-Up Performance</title><p><xref ref-type="table" rid="table2">Table 2</xref> shows detection rates and MAEs for push-ups across camera angles and distances. Overall, the mean detection rate was 61.1% (SD 48.8%), with an average MAE of 1.08 (SD 1.78) repetitions. Detection rates peaked for diagonal views at 90 cm and 180 cm (both 85.7%, SD 35.9%), as illustrated in <xref ref-type="fig" rid="figure1">Figure 1</xref>. The corresponding MAEs at these distances were low, at 0.29 (SD 0.90) repetitions. In contrast, the front view at 360 cm showed the lowest detection rate of 20% (SD 41.0%) and the highest MAE of 2.70 repetitions (SD 2.05).</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Detection rate of push-ups across distances and angles.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mhealth_v14i1e82412_fig01.png"/></fig><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Detection rate and MAE<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup> for push-ups across camera angles and distances.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Camera angle</td><td align="left" valign="bottom">Distance (cm)</td><td align="left" valign="bottom">Detection rate (%), mean (SD)</td><td align="left" valign="bottom">MAE, mean (SD)</td></tr></thead><tbody><tr><td align="left" valign="top">Side</td><td align="left" valign="top">90</td><td align="left" valign="top">68.4 (47.8)</td><td align="left" valign="top">0.842 (1.537)</td></tr><tr><td align="left" valign="top">Side</td><td align="left" valign="top">180</td><td align="left" valign="top">77.3 (42.9)</td><td align="left" valign="top">0.409 (0.796)</td></tr><tr><td align="left" valign="top">Side</td><td align="left" valign="top">200</td><td align="left" valign="top">66.7 (48.3)</td><td align="left" valign="top">1.095 (1.972)</td></tr><tr><td align="left" valign="top">Side</td><td align="left" valign="top">360</td><td align="left" valign="top">73.7 (45.2)</td><td align="left" valign="top">0.789 (1.653)</td></tr><tr><td align="left" valign="top">Diagonal</td><td align="left" valign="top">90</td><td align="left" valign="top">85.7 (35.9)</td><td align="left" valign="top">0.286 (0.902)</td></tr><tr><td align="left" valign="top">Diagonal</td><td align="left" valign="top">180</td><td align="left" valign="top">85.7 (35.9)</td><td align="left" valign="top">0.286 (0.902)</td></tr><tr><td align="left" valign="top">Diagonal</td><td align="left" valign="top">200</td><td align="left" valign="top">81.8 (39.5)</td><td align="left" valign="top">0.227 (0.528)</td></tr><tr><td align="left" valign="top">Diagonal</td><td align="left" valign="top">360</td><td align="left" valign="top">55.0 (51)</td><td align="left" valign="top">1.500 (2.482)</td></tr><tr><td align="left" valign="top">Front</td><td align="left" valign="top">90</td><td align="left" valign="top">36.8 (49.6)</td><td align="left" valign="top">1.684 (1.974)</td></tr><tr><td align="left" valign="top">Front</td><td align="left" valign="top">180</td><td align="left" valign="top">26.7 (45.8)</td><td align="left" valign="top">2.067 (2.017)</td></tr><tr><td align="left" valign="top">Front</td><td align="left" valign="top">200</td><td align="left" valign="top">33.3 (48.8)</td><td align="left" valign="top">1.733 (1.981)</td></tr><tr><td align="left" valign="top">Front</td><td align="left" valign="top">360</td><td align="left" valign="top">20 (41)</td><td align="left" valign="top">2.700 (2.055)</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>MAE: mean absolute error.</p></fn></table-wrap-foot></table-wrap><p>The GLMM (Table S1 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>) revealed a significant main effect of camera angle on accuracy. Specifically, front views had significantly lower odds of detection (OR=0.17, <italic>P</italic>=.03). However, no significant effects of distance alone were found. Conversely, a statistically significant interaction was found between the diagonal view and the 360 cm distance, which reduced the ability to correctly identify push-ups (<italic>P</italic>=.04).</p><p>Post hoc pairwise contrasts were performed to examine differences between the specific camera angles and distances (Table S2 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). These contrasts confirmed that diagonal camera views at 90 cm significantly outperformed frontal views at all distances, including 90 cm (estimate=3.41, <italic>P</italic>=.019), 180 cm (estimate=3.67, <italic>P</italic>=.02), 200 cm (estimate=3.52, <italic>P</italic>=.02), and 360 cm (estimate=4.40, <italic>P</italic>&#x003C;.001). There were also notable differences in the multiple mid-range diagonal and frontal configurations: the diagonal 180 cm view outperformed the frontal 180 cm view (estimate=3.64, <italic>P</italic>=.02), 200 cm (estimate=3.50, <italic>P</italic>=.03), as well as 360 cm (estimate=4.35, <italic>P</italic>=.002).</p><p>Likewise, the diagonal 200 cm view surpassed the frontal 360 cm view (estimate=3.96, <italic>P</italic>=.003). Additionally, the side views at 180 cm and 360 cm were significantly better than the frontal 360 cm view (estimate=3.586, <italic>P</italic>=.008, and estimate=3.174, <italic>P</italic>=.03, respectively). These results demonstrate that diagonal and mid-range frontal camera placements substantially increase push-up detection accuracy compared to frontal or side views at extreme distances.</p><p>Analyses of push-up repetition MAE using LMM were conducted to evaluate the error comparisons in the respective configurations. Although the main effects of angle and distance were not individually significant beyond the intercept (Table S3 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>), Tukey&#x2019;s post hoc tests (Table S4 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>) revealed meaningful differences between specific camera setups. Diagonal angles at mid-range distances consistently reduced counting errors compared with frontal and side angles at extreme distances. Diagonal views at 90 cm and 180 cm, for example, significantly outperformed frontal views at 180 cm and 360 cm (estimates=&#x2212;1.686 to &#x2212;2.395, <italic>P</italic>=.04). Diagonal views at 200 cm were also superior to frontal views at 360 cm (estimate=&#x2212;2.428, <italic>P</italic>&#x003C;.001). Side views at extreme distances exhibited higher MAE than frontal views at 360 cm (estimate=&#x2212;1.800, <italic>P</italic>&#x003C;.01). Overall, these results suggest that diagonal positioning at mid-range distances (approximately 180R&#x2010;200 cm) minimizes counting errors in push-ups.</p></sec><sec id="s3-3"><title>Squat Performance</title><p><xref ref-type="table" rid="table3">Table 3</xref> shows detection rates and MAEs for squats across camera angles and distances. Overall, the mean detection rate was 61.5% (SD 48.7%), with an average MAE of 1.11 (SD 1.82) repetitions. Detection rates peaked for the diagonal view at 200 cm (95.5%, SD 21.3%), as illustrated in <xref ref-type="fig" rid="figure2">Figure 2</xref>. The corresponding MAE at this distance was minimal, at 0.05 (SD 0.21) repetitions. In contrast, the side view at 90 cm showed the lowest detection rate of 0% (SD 0%) as well as the highest MAE of 5 (SD 0) repetitions.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Detection rate of squats across distances and angles.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mhealth_v14i1e82412_fig02.png"/></fig><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Detection rate and MAE<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup> for squats across camera angles and distances.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Camera angle</td><td align="left" valign="bottom">Distance (cm)</td><td align="left" valign="bottom">Detection rate (%), mean (SD)</td><td align="left" valign="bottom">MAE, mean (SD)</td></tr></thead><tbody><tr><td align="left" valign="top">Side</td><td align="left" valign="top">90</td><td align="left" valign="top">0 (0)</td><td align="left" valign="top">5 (0)</td></tr><tr><td align="left" valign="top">Side</td><td align="left" valign="top">180</td><td align="left" valign="top">28.6 (46.3)</td><td align="left" valign="top">0.714 (0.463)</td></tr><tr><td align="left" valign="top">Side</td><td align="left" valign="top">200</td><td align="left" valign="top">36.4 (49.2)</td><td align="left" valign="top">0.636 (0.492)</td></tr><tr><td align="left" valign="top">Side</td><td align="left" valign="top">360</td><td align="left" valign="top">23.8 (43.6)</td><td align="left" valign="top">3.095 (2.3)</td></tr><tr><td align="left" valign="top">Diagonal</td><td align="left" valign="top">90</td><td align="left" valign="top">45.5 (51)</td><td align="left" valign="top">1.318 (1.673)</td></tr><tr><td align="left" valign="top">Diagonal</td><td align="left" valign="top">180</td><td align="left" valign="top">90.9 (29.4)</td><td align="left" valign="top">0.091 (0.294)</td></tr><tr><td align="left" valign="top">Diagonal</td><td align="left" valign="top">200</td><td align="left" valign="top">95.5 (21.3)</td><td align="left" valign="top">0.045 (0.213)</td></tr><tr><td align="left" valign="top">Diagonal</td><td align="left" valign="top">360</td><td align="left" valign="top">90.9 (29.4)</td><td align="left" valign="top">0.091 (0.294)</td></tr><tr><td align="left" valign="top">Front</td><td align="left" valign="top">90</td><td align="left" valign="top">86.4 (35.1)</td><td align="left" valign="top">0.318 (0.945)</td></tr><tr><td align="left" valign="top">Front</td><td align="left" valign="top">180</td><td align="left" valign="top">81 (40.2)</td><td align="left" valign="top">0.571 (1.248)</td></tr><tr><td align="left" valign="top">Front</td><td align="left" valign="top">200</td><td align="left" valign="top">90 (30.8)</td><td align="left" valign="top">0.4 (1.273)</td></tr><tr><td align="left" valign="top">Front</td><td align="left" valign="top">360</td><td align="left" valign="top">70 (47)</td><td align="left" valign="top">1.1 (1.889)</td></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>MAE: mean absolute error.</p></fn></table-wrap-foot></table-wrap><p>Analyzing the binary accuracy through the GLMM (Table S5 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>) demonstrated that the diagonal and frontal angles significantly outperformed the side angle at 90 cm (OR=20.43, <italic>P</italic>&#x003C;.001, and OR=23.01, <italic>P</italic>&#x003C;.001, respectively). Detection also significantly improved at mid-range distances of 180 cm (OR=4.36, <italic>P</italic>=.002), 200 cm (OR=7.62, <italic>P</italic>&#x003C;.001), and 360 cm (OR=3.054, <italic>P</italic>=.01) compared to 90 cm. Post hoc pairwise comparisons (Table S6 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>) revealed that the diagonal view at 180 cm significantly outperformed the front view at 90 cm (estimate=2.810, <italic>P</italic>=.02) and the front view at 180 cm (estimate=3.058, <italic>P</italic>=.009). However, it underperformed compared to the front view at 200 cm (estimate=&#x2212;3.271, <italic>P</italic>=.01) and the front view at 360 cm (estimate=&#x2212;4.016, <italic>P</italic>=.02). Side views at 180 cm and 360 cm outperformed the front view at 360 cm in correctly detecting push-ups (estimates=2.907 to 3.239, both <italic>P</italic>=.04). The diagonal view at 200 cm had significantly better detection accuracy than the front view at 360 cm (estimate=3.174, <italic>P</italic>=.03). These findings suggest that diagonal mid-range placements generally perform well, but that frontal views at longer distances (200 and 360 cm) can sometimes outperform the diagonal view at 180 cm for squat detection.</p><p>For squats, LMM analysis of MAE (Table S7 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>) revealed significantly lower errors for diagonal and frontal angles compared with the side (estimates were &#x2212;3.682 and &#x2212;4.682, respectively; both <italic>P</italic>&#x003C;.001). All tested distances (180 cm, 200 cm, and 360 cm) were superior to the 90 cm reference (estimates ranged from &#x2212;4.364 to &#x2212;1.910; <italic>P</italic> values&#x003C;.001). Several significant interactions were observed. For instance, combining a diagonal angle with 180 cm or 200 cm decreased MAE by 3.06 and 3.09 repetitions, respectively (both <italic>P</italic>&#x003C;.001), which moderated the improvements to the main effects expected from angle and distance independently. The front &#x00D7; 180 cm interaction (estimate=4.547, <italic>P</italic>&#x003C;.001) and the front &#x00D7; 200 cm interaction (estimate=4.460, <italic>P</italic>&#x003C;.001) exhibited similar patterns, indicating smaller improvements than the sum of the main effects predicted. In the model, only the diagonal &#x00D7; 360 cm interaction was nonsignificant (<italic>P</italic>=.16). Several significant post hoc pairwise comparisons were identified (Table S8 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). These confirmed that diagonal and frontal mid-range placements consistently minimized error. Diagonal at 200 cm produced the largest reduction (estimate=&#x2212;4.96 repetitions, <italic>P</italic>&#x003C;.001), similar to diagonal at 180 cm (estimate=&#x2212;4.91 repetitions, <italic>P</italic>&#x003C;.001). Most configurations substantially reduced MAE compared to the 90 cm side, but the magnitude of improvement depended on the specific combination of angle and distance. This highlights notable interaction effects.</p></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Main Findings</title><p>This study demonstrates that smartphone camera angle and distance critically affect the accuracy of PE models for detecting and counting push-ups and squats. Consistent with our results, camera placements at intermediate distances (180&#x2010;200 cm) combined with oblique (diagonal) or frontal views generally yielded the highest detection rates and lowest counting errors. In contrast, very close-range setups (90 cm) and long-distance frontal views (360 cm) often showed reduced performance. For push-ups, diagonal views between 90 and 200 cm outperformed frontal angles, with the diagonal view at 200 cm position achieving the lowest MAE. For squats, diagonal and frontal views at 180 to 200 cm significantly outperformed side views, with diagonal 180 cm, diagonal 200 cm, and front 200 cm producing the smallest MAEs and the largest error reductions in post hoc tests. Although front 360 cm occasionally approached the accuracy of diagonal 180 cm, most mid-range configurations substantially outperformed both close-range and long-distance side views. These findings provide direct evidence that mid-range diagonal and frontal camera configurations optimize PE performance, informing best practices for smartphone-based exercise monitoring.</p></sec><sec id="s4-2"><title>Comparison to Prior Work</title><p>Unlike prior lab-based, multicamera studies [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref31">31</xref>], our study provides practical, configuration-specific guidance for monocular smartphone setups. This approach enhances ecological validity by closely mimicking typical home or gym environments where single-camera devices are commonly used. While [<xref ref-type="bibr" rid="ref17">17</xref>] demonstrated that full-body visibility at 300 cm supports accurate gait tracking, we show that diagonal views at 180 to 200 cm consistently optimize visibility and accuracy for dynamic strength exercises. Views at 360 cm, however, yield variable and often low detection rates (push-ups: 20%&#x2010;73.7%; squats: 23%&#x2010;90.9%), depending on the angle [<xref ref-type="bibr" rid="ref46">46</xref>], evaluated smartphone-based distance estimation between 100 and 300 cm, the trade-offs between spatial accuracy, and usability. Our findings confirm that mid-range diagonal placements are optimal for PE performance, likely due to differences in exercise posture (horizontal for push-ups vs upright for squats).</p><p>Prior studies have evidenced smartphone apps using TensorFlow Lite and COCO-trained models for exercise counting [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref47">47</xref>], but have not comprehensively examined spatial configuration effects. Our results address this gap, complementing [<xref ref-type="bibr" rid="ref48">48</xref>] who emphasized viewpoint in PE data acquisition [<xref ref-type="bibr" rid="ref9">9</xref>], advocating for accessible PE tools beyond labs, and [<xref ref-type="bibr" rid="ref12">12</xref>] who discussed challenges of motion tracking in naturalistic settings. By offering concrete camera setup recommendations, this work contributes actionable insights for mHealth, home fitness, telerehabilitation, sports performance, and clinical decision-making contexts.</p></sec><sec id="s4-3"><title>Practical Implications for mHealth</title><p>Positioning the smartphone camera diagonally (~45&#x00B0;) at 180 to 200 cm significantly enhances PE accuracy without additional hardware. Mobile apps can integrate augmented reality or setup guides to assist users in achieving optimal device placement. Accurate repetition counting supports load monitoring, fatigue tracking, and muscular endurance assessment in unsupervised environments [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]. Combining spatial optimization with adaptive feedback and personalized experiences may further improve tracking reliability and user engagement, consistent with evidence from behavior change and human-computer interaction research [<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref52">52</xref>]. This integrated approach presents a promising path for scalable, user-friendly mHealth exercise platforms.</p></sec><sec id="s4-4"><title>Limitations</title><p>This study has several limitations. First, PE performance was evaluated using manual annotations rather than a gold-standard motion capture system, which may introduce variability. Second, the sample comprised healthy young adults in a controlled laboratory environment, limiting generalizability to clinical populations, older adults, or real-world contexts with variable lighting and backgrounds. Third, while only 1 PE pipeline was tested, integrating multiple public datasets and the custom DotPose dataset helped mitigate biases related to scene composition, body type, exercise variety, and environmental scenarios. Furthermore, comparisons with other open-source PE models (eg, OpenPose, HRNet [Microsoft Research Asia], and BlazePose) are limited by variations in datasets, computational demands, and architectural design [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref53">53</xref>]. Finally, lightweight 2D models, such as MoveNet, enable near-real-time, on-device inference. These models balance speed, portability, and user engagement with the fine-grained accuracy achievable by heavier, cloud-reliant models [<xref ref-type="bibr" rid="ref13">13</xref>]. This hybrid mobile-cloud configuration supports the robust evaluation of push-ups and squats across multiple camera setups. Nevertheless, findings may not generalize to other PE architectures, populations, or unstructured environments.</p></sec><sec id="s4-5"><title>Future Directions</title><p>Future work should explore multimodal pipelines, diverse participant groups, and variable environmental conditions (eg, low-light and high-contrast conditions) to enhance robustness, applicability, and real-world relevance of mobile PE systems. While repetition counting is a fundamental first step, future mHealth systems should also evaluate movement quality. This should encompass compensatory strategies and fatigue-related adaptations [<xref ref-type="bibr" rid="ref54">54</xref>-<xref ref-type="bibr" rid="ref56">56</xref>]. Emerging real-time PE technologies that optimize sensor or camera placement, combined with adaptive feedback responsive to user performance, offer opportunities to enhance movement accuracy and user experience in unsupervised settings [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref58">58</xref>]. Longitudinal studies are needed to assess the integration of spatial setup guidance, personalization, and real-time feedback for improved usability, engagement, and long-term clinical or fitness outcomes.</p></sec><sec id="s4-6"><title>Conclusions</title><p>Camera angle and distance significantly affect the accuracy of PE systems for exercise detection. For optimal performance, smartphone cameras should be positioned at mid-range distances (180&#x2010;200 cm) with diagonal or frontal views. For push-ups, diagonal views are preferred, while for squats, both diagonal and frontal views perform well. Conversely, close-range setups (90 cm) and long-distance frontal views (360 cm) substantially reduce detection and counting accuracy. These findings provide actionable guidance for developing scalable, accurate, and user-friendly mHealth exercise tracking platforms.</p></sec></sec></body><back><ack><p>The authors would like to thank the students who participated in this study for their time and commitment. They would also like to acknowledge the University of &#x00C9;vora for its support in facilitating the research. During the preparation of the manuscript, ChatGPT was used to suggest grammatical edits. The authors reviewed and edited all content and took full responsibility for the final version.</p></ack><notes><sec><title>Funding</title><p>This work was financially supported by Project Blockchain.PT&#x2014;Decentralize Portugal with Blockchain Agenda (Project 51), WP4 Sports, Leisure, and Culture, Call number 02/C05-i01.01/2022, funded by the Portuguese Recovery and Resilience Program, the Portuguese Republic, and the European Union under the framework of the Next Generation European Union Program. The funders had no involvement in the study design, data collection, analysis, interpretation of results, or the writing of the manuscript.</p></sec><sec><title>Data Availability</title><p>The study leveraged established benchmark datasets for human pose estimation, including COCO (Common Objects in Context) [<xref ref-type="bibr" rid="ref38">38</xref>], SMART (Sports Motion and Recognition Tasks) [<xref ref-type="bibr" rid="ref41">41</xref>], LSP (Leeds Sports Pose) Extended [<xref ref-type="bibr" rid="ref42">42</xref>], Penn Action [<xref ref-type="bibr" rid="ref43">43</xref>], and MPII Human Pose [<xref ref-type="bibr" rid="ref44">44</xref>]. Additionally, the data collected from the 44 participants are available from the corresponding author upon reasonable request.</p></sec></notes><fn-group><fn fn-type="con"><p>Conceptualization: EO, SF, APG</p><p>Data curation: EO, GV</p><p>Formal analysis: EO, FG-F</p><p>Investigation: EO, SF, JP, PP</p><p>Methodology: EO, GV</p><p>Project administration: SA</p><p>Supervision: APG</p><p>Writing &#x2013; original draft: EO</p><p>Writing &#x2013; review &#x0026; editing: SF, APG, GV, JP, PP, FG-F, SA</p><p>All authors have reviewed and approved the final manuscript.</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">COCO</term><def><p>Common Objects in Context</p></def></def-item><def-item><term id="abb3">GLMM</term><def><p>generalized linear mixed model</p></def></def-item><def-item><term id="abb4">LMM</term><def><p>linear mixed effects model</p></def></def-item><def-item><term id="abb5">LSP</term><def><p>Leeds Sports Pose</p></def></def-item><def-item><term id="abb6">MAE</term><def><p>mean absolute error</p></def></def-item><def-item><term id="abb7">mHealth</term><def><p>mobile health</p></def></def-item><def-item><term id="abb8">OR</term><def><p>odds ratio</p></def></def-item><def-item><term id="abb9">PE</term><def><p>pose estimation</p></def></def-item><def-item><term id="abb10">SMART</term><def><p>Sports Motion and Recognition Tasks</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dallinga</surname><given-names>J</given-names> </name><name name-style="western"><surname>Janssen</surname><given-names>M</given-names> </name><name name-style="western"><surname>van der Werf</surname><given-names>J</given-names> </name><name name-style="western"><surname>Walravens</surname><given-names>R</given-names> </name><name name-style="western"><surname>Vos</surname><given-names>S</given-names> </name><name name-style="western"><surname>Deutekom</surname><given-names>M</given-names> </name></person-group><article-title>Analysis of the features important for the effectiveness of physical activity&#x2013;related apps for recreational sports: expert panel approach</article-title><source>JMIR mHealth uHealth</source><year>2018</year><month>06</month><day>18</day><volume>6</volume><issue>6</issue><fpage>e143</fpage><pub-id pub-id-type="doi">10.2196/mhealth.9459</pub-id><pub-id pub-id-type="medline">29914863</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kohl</surname><given-names>HW</given-names>  <suffix>3rd</suffix></name><name name-style="western"><surname>Craig</surname><given-names>CL</given-names> </name><name name-style="western"><surname>Lambert</surname><given-names>EV</given-names> </name><etal/></person-group><article-title>The pandemic of physical inactivity: global action for public health</article-title><source>Lancet</source><year>2012</year><month>07</month><volume>380</volume><issue>9838</issue><fpage>294</fpage><lpage>305</lpage><pub-id pub-id-type="doi">10.1016/S0140-6736(12)60898-8</pub-id><pub-id pub-id-type="medline">23041199</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kardan</surname><given-names>M</given-names> </name><name name-style="western"><surname>Jung</surname><given-names>A</given-names> </name><name name-style="western"><surname>Iqbal</surname><given-names>M</given-names> </name><name name-style="western"><surname>Keshtkar</surname><given-names>S</given-names> </name><name name-style="western"><surname>Geidl</surname><given-names>W</given-names> </name><name name-style="western"><surname>Pfeifer</surname><given-names>K</given-names> </name></person-group><article-title>Efficacy of digital interventions on physical activity promotion in individuals with noncommunicable diseases: an overview of systematic reviews</article-title><source>BMC Digit Health</source><year>2024</year><volume>2</volume><issue>1</issue><fpage>40</fpage><pub-id pub-id-type="doi">10.1186/s44247-024-00097-6</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="web"><article-title>Physical activity</article-title><source>World Health Organization</source><access-date>2026-02-10</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.who.int/news-room/fact-sheets/detail/physical-activity">https://www.who.int/news-room/fact-sheets/detail/physical-activity</ext-link></comment></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hallal</surname><given-names>PC</given-names> </name><name name-style="western"><surname>Andersen</surname><given-names>LB</given-names> </name><name name-style="western"><surname>Bull</surname><given-names>FC</given-names> </name><etal/></person-group><article-title>Global physical activity levels: surveillance progress, pitfalls, and prospects</article-title><source>Lancet</source><year>2012</year><month>07</month><day>21</day><volume>380</volume><issue>9838</issue><fpage>247</fpage><lpage>257</lpage><pub-id pub-id-type="doi">10.1016/S0140-6736(12)60646-1</pub-id><pub-id pub-id-type="medline">22818937</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Buecker</surname><given-names>S</given-names> </name><name name-style="western"><surname>Simacek</surname><given-names>T</given-names> </name><name name-style="western"><surname>Ingwersen</surname><given-names>B</given-names> </name><name name-style="western"><surname>Terwiel</surname><given-names>S</given-names> </name><name name-style="western"><surname>Simonsmeier</surname><given-names>BA</given-names> </name></person-group><article-title>Physical activity and subjective well-being in healthy individuals: a meta-analytic review</article-title><source>Health Psychol Rev</source><year>2021</year><month>12</month><volume>15</volume><issue>4</issue><fpage>574</fpage><lpage>592</lpage><pub-id pub-id-type="doi">10.1080/17437199.2020.1760728</pub-id><pub-id pub-id-type="medline">32452716</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>N&#x00FA;&#x00F1;ez-Cort&#x00E9;s</surname><given-names>R</given-names> </name><name name-style="western"><surname>Salazar-M&#x00E9;ndez</surname><given-names>J</given-names> </name><name name-style="western"><surname>Nijs</surname><given-names>J</given-names> </name></person-group><article-title>Physical activity as a central pillar of lifestyle modification in the management of chronic musculoskeletal pain: a narrative review</article-title><source>J Funct Morphol Kinesiol</source><year>2025</year><month>05</month><day>20</day><volume>10</volume><issue>2</issue><fpage>183</fpage><pub-id pub-id-type="doi">10.3390/jfmk10020183</pub-id><pub-id pub-id-type="medline">40407467</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Avogaro</surname><given-names>A</given-names> </name><name name-style="western"><surname>Cunico</surname><given-names>F</given-names> </name><name name-style="western"><surname>Rosenhahn</surname><given-names>B</given-names> </name><name name-style="western"><surname>Setti</surname><given-names>F</given-names> </name></person-group><article-title>Markerless human pose estimation for biomedical applications: a survey</article-title><source>Front Comput Sci</source><year>2023</year><volume>5</volume><fpage>1153160</fpage><pub-id pub-id-type="doi">10.3389/fcomp.2023.1153160</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Stenum</surname><given-names>J</given-names> </name><name name-style="western"><surname>Cherry-Allen</surname><given-names>KM</given-names> </name><name name-style="western"><surname>Pyles</surname><given-names>CO</given-names> </name><name name-style="western"><surname>Reetzke</surname><given-names>RD</given-names> </name><name name-style="western"><surname>Vignos</surname><given-names>MF</given-names> </name><name name-style="western"><surname>Roemmich</surname><given-names>RT</given-names> </name></person-group><article-title>Applications of pose estimation in human health and performance across the lifespan</article-title><source>Sensors (Basel)</source><year>2021</year><month>11</month><day>3</day><volume>21</volume><issue>21</issue><fpage>7315</fpage><pub-id pub-id-type="doi">10.3390/s21217315</pub-id><pub-id pub-id-type="medline">34770620</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>El-Rajab</surname><given-names>I</given-names> </name><name name-style="western"><surname>Klotzbier</surname><given-names>TJ</given-names> </name><name name-style="western"><surname>Korbus</surname><given-names>H</given-names> </name><name name-style="western"><surname>Schott</surname><given-names>N</given-names> </name></person-group><article-title>Camera-based mobile applications for movement screening in healthy adults: a systematic review</article-title><source>Front Sports Act Living</source><year>2025</year><volume>7</volume><fpage>1531050</fpage><pub-id pub-id-type="doi">10.3389/fspor.2025.1531050</pub-id><pub-id pub-id-type="medline">40416048</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Luangaphirom</surname><given-names>T</given-names> </name><name name-style="western"><surname>Lueprasert</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kaewvichit</surname><given-names>P</given-names> </name><name name-style="western"><surname>Boonphotsiri</surname><given-names>S</given-names> </name><name name-style="western"><surname>Burapasikarin</surname><given-names>T</given-names> </name><name name-style="western"><surname>Siriborvornratanakul</surname><given-names>T</given-names> </name></person-group><article-title>Real-time weight training counting and correction using MediaPipe</article-title><source>Adv in Comp Int</source><year>2024</year><month>06</month><volume>4</volume><issue>2</issue><fpage>3</fpage><pub-id pub-id-type="doi">10.1007/s43674-024-00070-w</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Roggio</surname><given-names>F</given-names> </name><name name-style="western"><surname>Trovato</surname><given-names>B</given-names> </name><name name-style="western"><surname>Sortino</surname><given-names>M</given-names> </name><name name-style="western"><surname>Musumeci</surname><given-names>G</given-names> </name></person-group><article-title>A comprehensive analysis of the machine learning pose estimation models used in human movement and posture analyses: a narrative review</article-title><source>Heliyon</source><year>2024</year><volume>10</volume><issue>21</issue><fpage>e39977</fpage><pub-id pub-id-type="doi">10.1016/j.heliyon.2024.e39977</pub-id><pub-id pub-id-type="medline">39553598</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Phosanarack</surname><given-names>M</given-names> </name><name name-style="western"><surname>Wallard</surname><given-names>L</given-names> </name><name name-style="western"><surname>Lepreux</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kolski</surname><given-names>C</given-names> </name><name name-style="western"><surname>Avril</surname><given-names>E</given-names> </name></person-group><article-title>Smartphone exergames with real-time markerless motion capture: challenges and trade-offs</article-title><source>arXiv</source><comment>Preprint posted online on  Jul 9, 2025</comment><pub-id pub-id-type="doi">10.48550/ARXIV.2507.06669</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Badiola-Bengoa</surname><given-names>A</given-names> </name><name name-style="western"><surname>Mendez-Zorrilla</surname><given-names>A</given-names> </name></person-group><article-title>A systematic review of the application of camera&#x2011;based human pose estimation in the field of sport and physical exercise</article-title><source>Sensors (Basel)</source><year>2021</year><month>09</month><day>7</day><volume>21</volume><issue>18</issue><fpage>5996</fpage><pub-id pub-id-type="doi">10.3390/s21185996</pub-id><pub-id pub-id-type="medline">34577204</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cornman</surname><given-names>HL</given-names> </name><name name-style="western"><surname>Stenum</surname><given-names>J</given-names> </name><name name-style="western"><surname>Roemmich</surname><given-names>RT</given-names> </name></person-group><article-title>Video-based quantification of human movement frequency using pose estimation: a pilot study</article-title><source>PLoS One</source><year>2021</year><volume>16</volume><issue>12</issue><fpage>e0261450</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0261450</pub-id><pub-id pub-id-type="medline">34929012</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Jeon</surname><given-names>H</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>D</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>J</given-names> </name></person-group><article-title>Human motion assessment on mobile devices</article-title><conf-name>2021 International Conference on Information and Communication Technology Convergence (ICTC)</conf-name><conf-date>Oct 20-22, 2021</conf-date><pub-id pub-id-type="doi">10.1109/ICTC52510.2021.9621114</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Park</surname><given-names>K</given-names> </name></person-group><article-title>Improving gait analysis techniques with markerless pose estimation based on smartphone location</article-title><source>Bioengineering (Basel)</source><year>2024</year><month>01</month><day>30</day><volume>11</volume><issue>2</issue><fpage>141</fpage><pub-id pub-id-type="doi">10.3390/bioengineering11020141</pub-id><pub-id pub-id-type="medline">38391625</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Liao</surname><given-names>K</given-names> </name><name name-style="western"><surname>Bishop</surname><given-names>C</given-names> </name><name name-style="western"><surname>Bian</surname><given-names>C</given-names> </name><name name-style="western"><surname>Li</surname><given-names>Y</given-names> </name></person-group><article-title>Validity of three commercial devices for recording movement velocity in the Bulgarian split squat</article-title><source>J Hum Kinet</source><year>2024</year><volume>95</volume><fpage>161</fpage><lpage>171</lpage><pub-id pub-id-type="doi">10.5114/jhk/189365</pub-id><pub-id pub-id-type="medline">39944971</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Stamm</surname><given-names>O</given-names> </name><name name-style="western"><surname>Heimann-Steinert</surname><given-names>A</given-names> </name></person-group><article-title>Accuracy of monocular two&#x2011;dimensional pose estimation compared with a reference standard for kinematic multiview analysis: validation study</article-title><source>JMIR mHealth uHealth</source><year>2020</year><month>12</month><day>21</day><volume>8</volume><issue>12</issue><fpage>e19608</fpage><pub-id pub-id-type="doi">10.2196/19608</pub-id><pub-id pub-id-type="medline">33346739</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>van den Hoorn</surname><given-names>W</given-names> </name><name name-style="western"><surname>Lavaill</surname><given-names>M</given-names> </name><name name-style="western"><surname>Cutbush</surname><given-names>K</given-names> </name><name name-style="western"><surname>Gupta</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kerr</surname><given-names>G</given-names> </name></person-group><article-title>Comparison of shoulder range of motion quantified with mobile phone video&#x2011;based skeletal tracking and 3D motion capture-preliminary study</article-title><source>Sensors (Basel)</source><year>2024</year><month>01</month><day>15</day><volume>24</volume><issue>2</issue><fpage>534</fpage><pub-id pub-id-type="doi">10.3390/s24020534</pub-id><pub-id pub-id-type="medline">38257626</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mercadal-Baudart</surname><given-names>C</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>CJ</given-names> </name><name name-style="western"><surname>Farrell</surname><given-names>G</given-names> </name><etal/></person-group><article-title>Exercise quantification from single camera view markerless 3D pose estimation</article-title><source>Heliyon</source><year>2024</year><month>03</month><day>30</day><volume>10</volume><issue>6</issue><fpage>e27596</fpage><pub-id pub-id-type="doi">10.1016/j.heliyon.2024.e27596</pub-id><pub-id pub-id-type="medline">38510055</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ferreira</surname><given-names>B</given-names> </name><name name-style="western"><surname>Ferreira</surname><given-names>PM</given-names> </name><name name-style="western"><surname>Pinheiro</surname><given-names>G</given-names> </name><etal/></person-group><article-title>Deep learning approaches for workout repetition counting and validation</article-title><source>Pattern Recognit Lett</source><year>2021</year><month>11</month><volume>151</volume><fpage>259</fpage><lpage>266</lpage><pub-id pub-id-type="doi">10.1016/j.patrec.2021.09.006</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rao</surname><given-names>P</given-names> </name><name name-style="western"><surname>Asha</surname><given-names>CS</given-names> </name><name name-style="western"><surname>Rao</surname><given-names>PR</given-names> </name></person-group><article-title>Real-time posture correction of squat exercise: a deep learning approach for performance analysis and error correction</article-title><source>IEEE Access</source><year>2025</year><volume>13</volume><fpage>39557</fpage><lpage>39571</lpage><pub-id pub-id-type="doi">10.1109/ACCESS.2025.3545207</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Japhne</surname><given-names>F</given-names> </name><name name-style="western"><surname>Janada</surname><given-names>K</given-names> </name><name name-style="western"><surname>Theodorus</surname><given-names>A</given-names> </name><name name-style="western"><surname>Chowanda</surname><given-names>A</given-names> </name></person-group><article-title>Fitcam: detecting and counting repetitive exercises with deep learning</article-title><source>J Big Data</source><year>2024</year><volume>11</volume><issue>1</issue><fpage>101</fpage><pub-id pub-id-type="doi">10.1186/s40537-024-00915-8</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Sinclair</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kautai</surname><given-names>K</given-names> </name><name name-style="western"><surname>Shahamiri</surname><given-names>SR</given-names> </name></person-group><article-title>P&#x016B;ioio: on-device real-time smartphone-based automated exercise repetition counting system</article-title><source>arXiv</source><comment>Preprint posted online on  Jul 22, 2023</comment><pub-id pub-id-type="doi">10.48550/ARXIV.2308.02420</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fisal</surname><given-names>NR</given-names> </name><name name-style="western"><surname>Fathalla</surname><given-names>A</given-names> </name><name name-style="western"><surname>Elmanakhly</surname><given-names>DA</given-names> </name><name name-style="western"><surname>Salah</surname><given-names>A</given-names> </name></person-group><article-title>Reported challenges in deep learning&#x2011;based human pose estimation: a systematic review</article-title><source>IEEE Access</source><year>2025</year><volume>13</volume><fpage>80520</fpage><lpage>80539</lpage><pub-id pub-id-type="doi">10.1109/ACCESS.2025.3567337</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fanton</surname><given-names>M</given-names> </name><name name-style="western"><surname>Harari</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Giffhorn</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Validation of Amazon Halo Movement: a smartphone camera-based assessment of movement health</article-title><source>NPJ Digit Med</source><year>2022</year><month>09</month><day>6</day><volume>5</volume><issue>1</issue><fpage>134</fpage><pub-id pub-id-type="doi">10.1038/s41746-022-00684-9</pub-id><pub-id pub-id-type="medline">36065060</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Biebl</surname><given-names>JT</given-names> </name><name name-style="western"><surname>Rykala</surname><given-names>M</given-names> </name><name name-style="western"><surname>Strobel</surname><given-names>M</given-names> </name><etal/></person-group><article-title>App&#x2011;based feedback for rehabilitation exercise correction in patients with knee or hip osteoarthritis: prospective cohort study</article-title><source>J Med Internet Res</source><year>2021</year><month>07</month><day>13</day><volume>23</volume><issue>7</issue><fpage>e26658</fpage><pub-id pub-id-type="doi">10.2196/26658</pub-id><pub-id pub-id-type="medline">34255677</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="book"><person-group person-group-type="author"><collab>American College of Sports Medicine</collab></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Liguori</surname><given-names>G</given-names> </name><name name-style="western"><surname>Feito</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Fountaine</surname><given-names>C</given-names> </name><name name-style="western"><surname>Roy</surname><given-names>BA</given-names> </name></person-group><source>ACSM&#x2019;s Guidelines for Exercise Testing and Prescription</source><year>2021</year><access-date>2026-02-10</access-date><edition>11</edition><publisher-name>Wolters Kluwer</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://books.google.co.in/books?id=yjibzQEACAAJ">https://books.google.co.in/books?id=yjibzQEACAAJ</ext-link></comment><pub-id pub-id-type="other">9781975150198</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Miranda</surname><given-names>GHL</given-names> </name></person-group><source>Biofotogrametria Para Fisioterapeutas [Book in Portuguese]</source><year>2014</year><access-date>2026-02-10</access-date><edition>1</edition><publisher-name>Andreoli</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.indicalivros.com/livros/biofotogrametria-para-fisioterapeutas-geraldo-henrique-lopes-miranda">https://www.indicalivros.com/livros/biofotogrametria-para-fisioterapeutas-geraldo-henrique-lopes-miranda</ext-link></comment><pub-id pub-id-type="other">9788560416394</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Park</surname><given-names>HJ</given-names> </name><name name-style="western"><surname>Baek</surname><given-names>JW</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>JH</given-names> </name></person-group><article-title>Imagery based parametric classification of correct and incorrect motion for push-up counter using OpenPose</article-title><conf-name>2020 IEEE 16th International Conference on Automation Science and Engineering (CASE)</conf-name><conf-date>Aug 20-21, 2020</conf-date><pub-id pub-id-type="doi">10.1109/CASE48305.2020.9216833</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Youssef</surname><given-names>F</given-names> </name><name name-style="western"><surname>Zaky</surname><given-names>AB</given-names> </name><name name-style="western"><surname>Gomaa</surname><given-names>W</given-names> </name></person-group><article-title>Analysis of the squat exercise from visual data</article-title><conf-name>19th International Conference on Informatics in Control, Automation and Robotics (ICINCO 2022)</conf-name><conf-date>Jul 14-16, 2022</conf-date><pub-id pub-id-type="doi">10.5220/0011347900003271</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Hande</surname><given-names>T</given-names> </name><name name-style="western"><surname>Kakirwar</surname><given-names>B</given-names> </name><name name-style="western"><surname>Bharadwaja</surname><given-names>AV</given-names> </name><name name-style="western"><surname>Kshirsagar</surname><given-names>P</given-names> </name><name name-style="western"><surname>Gupta</surname><given-names>A</given-names> </name><name name-style="western"><surname>Vijayakumar</surname><given-names>P</given-names> </name></person-group><article-title>Correction and estimation of workout postures with pose estimation using AI</article-title><conf-name>2023 International Conference on Intelligent and Innovative Technologies in Computing, Electrical and Electronics (IITCEE)</conf-name><conf-date>Jan 27-28, 2023</conf-date><pub-id pub-id-type="doi">10.1109/IITCEE57236.2023.10090463</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chae</surname><given-names>HJ</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>JB</given-names> </name><name name-style="western"><surname>Park</surname><given-names>G</given-names> </name><name name-style="western"><surname>O&#x2019;Sullivan</surname><given-names>DM</given-names> </name><name name-style="western"><surname>Seo</surname><given-names>J</given-names> </name><name name-style="western"><surname>Park</surname><given-names>JJ</given-names> </name></person-group><article-title>An artificial intelligence exercise coaching mobile app: development and randomized controlled trial to verify its effectiveness in posture correction</article-title><source>Interact J Med Res</source><year>2023</year><month>09</month><day>12</day><volume>12</volume><fpage>e37604</fpage><pub-id pub-id-type="doi">10.2196/37604</pub-id><pub-id pub-id-type="medline">37698913</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chariar</surname><given-names>M</given-names> </name><name name-style="western"><surname>Rao</surname><given-names>S</given-names> </name><name name-style="western"><surname>Irani</surname><given-names>A</given-names> </name><name name-style="western"><surname>Suresh</surname><given-names>S</given-names> </name><name name-style="western"><surname>Asha</surname><given-names>CS</given-names> </name></person-group><article-title>AI Trainer: autoencoder based approach for squat analysis and correction</article-title><source>IEEE Access</source><year>2023</year><volume>11</volume><fpage>107135</fpage><lpage>107149</lpage><pub-id pub-id-type="doi">10.1109/ACCESS.2023.3316009</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Han</surname><given-names>SZH</given-names> </name><name name-style="western"><surname>Lim</surname><given-names>KYT</given-names> </name></person-group><article-title>Designing and prototyping of AI-based real-time mobile detectors for calisthenic push-up exercise</article-title><conf-name>International Conference on Health and Social Care Information Systems and Technologies (HCist 2023)</conf-name><conf-date>Nov 8-10, 2023</conf-date><pub-id pub-id-type="doi">10.1016/j.procs.2024.06.192</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="web"><source>Dotmoovs</source><access-date>2026-02-10</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.dotmoovs.com/">https://www.dotmoovs.com/</ext-link></comment></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Lin</surname><given-names>TY</given-names> </name><name name-style="western"><surname>Maire</surname><given-names>M</given-names> </name><name name-style="western"><surname>Belongie</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Microsoft COCO: common objects in context</article-title><conf-name>13th European Conference on Computer Vision (ECCV 2014)</conf-name><conf-date>Sep 6-12, 2014</conf-date><pub-id pub-id-type="doi">10.1007/978-3-319-10602-1_48</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Votel</surname><given-names>R</given-names> </name><name name-style="western"><surname>Li</surname><given-names>N</given-names> </name></person-group><article-title>Next-generation pose detection with MoveNet and TensorFlow.js</article-title><source>TensorFlow Blog</source><year>2021</year><month>05</month><day>17</day><access-date>2025-10-28</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://blog.tensorflow.org/2021/05/next-generation-pose-detection-with-movenet-and-tensorflowjs.html">https://blog.tensorflow.org/2021/05/next-generation-pose-detection-with-movenet-and-tensorflowjs.html</ext-link></comment></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kumar</surname><given-names>P</given-names> </name><name name-style="western"><surname>Chauhan</surname><given-names>S</given-names> </name><name name-style="western"><surname>Awasthi</surname><given-names>LK</given-names> </name></person-group><article-title>Human pose estimation using deep learning: review, methodologies, progress and future research directions</article-title><source>Int J Multimed Info Retr</source><year>2022</year><volume>11</volume><issue>4</issue><fpage>489</fpage><lpage>521</lpage><pub-id pub-id-type="doi">10.1007/s13735-022-00261-6</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>X</given-names> </name><name name-style="western"><surname>Pang</surname><given-names>A</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>W</given-names> </name><name name-style="western"><surname>Ma</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>L</given-names> </name><name name-style="western"><surname>Yu</surname><given-names>J</given-names> </name></person-group><article-title>SportsCap: monocular 3D human motion capture and fine-grained understanding in challenging sports videos</article-title><source>Int J Comput Vis</source><year>2021</year><volume>129</volume><issue>10</issue><fpage>2846</fpage><lpage>2864</lpage><pub-id pub-id-type="doi">10.1007/s11263-021-01486-4</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Johnson</surname><given-names>S</given-names> </name><name name-style="western"><surname>Everingham</surname><given-names>M</given-names> </name></person-group><article-title>Learning effective human pose estimation from inaccurate annotation</article-title><conf-name>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name><conf-date>Jun 20-25, 2011</conf-date><pub-id pub-id-type="doi">10.1109/CVPR.2011.5995318</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>W</given-names> </name><name name-style="western"><surname>Zhu</surname><given-names>M</given-names> </name><name name-style="western"><surname>Derpanis</surname><given-names>KG</given-names> </name></person-group><article-title>From actemes to action: a strongly-supervised representation for detailed action understanding</article-title><conf-name>Proceedings of the IEEE International Conference on Computer Vision (ICCV) 2013</conf-name><conf-date>Dec 1-8, 2013</conf-date><pub-id pub-id-type="doi">10.1109/ICCV.2013.280</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Andriluka</surname><given-names>M</given-names> </name><name name-style="western"><surname>Pishchulin</surname><given-names>L</given-names> </name><name name-style="western"><surname>Gehler</surname><given-names>P</given-names> </name><name name-style="western"><surname>Schiele</surname><given-names>B</given-names> </name></person-group><article-title>2D human pose estimation: new benchmark and state of the art analysis</article-title><conf-name>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name><conf-date>Jun 23-28, 2014</conf-date><pub-id pub-id-type="doi">10.1109/CVPR.2014.471</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dibenedetto</surname><given-names>G</given-names> </name><name name-style="western"><surname>Sotiropoulos</surname><given-names>S</given-names> </name><name name-style="western"><surname>Polignano</surname><given-names>M</given-names> </name><name name-style="western"><surname>Cavallo</surname><given-names>G</given-names> </name><name name-style="western"><surname>Lops</surname><given-names>P</given-names> </name></person-group><article-title>Comparing human pose estimation through deep learning approaches: an overview</article-title><source>Comput Vis Image Underst</source><year>2025</year><month>02</month><volume>252</volume><fpage>104297</fpage><pub-id pub-id-type="doi">10.1016/j.cviu.2025.104297</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hamilton-Fletcher</surname><given-names>G</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>M</given-names> </name><name name-style="western"><surname>Sheng</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Accuracy and usability of smartphone&#x2011;based distance estimation approaches for visual assistive technology development</article-title><source>IEEE Open J Eng Med Biol</source><year>2024</year><volume>5</volume><fpage>54</fpage><lpage>58</lpage><pub-id pub-id-type="doi">10.1109/OJEMB.2024.3358562</pub-id><pub-id pub-id-type="medline">38487094</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Jeon</surname><given-names>H</given-names> </name><name name-style="western"><surname>Yoon</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>D</given-names> </name></person-group><article-title>Lightweight 2D human pose estimation for fitness coaching system</article-title><conf-name>2021 36th International Technical Conference on Circuits/Systems, Computers and Communications (ITC-CSCC)</conf-name><conf-date>Jun 27-30, 2021</conf-date><pub-id pub-id-type="doi">10.1109/ITC-CSCC52171.2021.9501458</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Konak</surname><given-names>O</given-names> </name><name name-style="western"><surname>Wischmann</surname><given-names>A</given-names> </name><name name-style="western"><surname>van De Water</surname><given-names>R</given-names> </name><name name-style="western"><surname>Arnrich</surname><given-names>B</given-names> </name></person-group><article-title>A real-time human pose estimation approach for optimal sensor placement in sensor-based human activity recognition</article-title><conf-name>8th International Workshop on Sensor&#x2011;Based Activity Recognition and Artificial Intelligence (iWOAR 2023)</conf-name><conf-date>Sep 21-22, 2023</conf-date><pub-id pub-id-type="doi">10.1145/3615834.3615848</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ebben</surname><given-names>WP</given-names> </name><name name-style="western"><surname>Feldmann</surname><given-names>CR</given-names> </name><name name-style="western"><surname>Dayne</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Using squat testing to predict training loads for the deadlift, lunge, step-up, and leg extension exercises</article-title><source>J Strength Cond Res</source><year>2008</year><month>11</month><volume>22</volume><issue>6</issue><fpage>1947</fpage><lpage>1949</lpage><pub-id pub-id-type="doi">10.1519/JSC.0b013e31818747c9</pub-id><pub-id pub-id-type="medline">18978614</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lin</surname><given-names>II</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>YL</given-names> </name><name name-style="western"><surname>Chuang</surname><given-names>LL</given-names> </name></person-group><article-title>Test&#x2011;retest reliability of home&#x2011;based fitness assessments using a mobile app (R Plus Health) in healthy adults: prospective quantitative study</article-title><source>JMIR Form Res</source><year>2021</year><month>12</month><day>8</day><volume>5</volume><issue>12</issue><fpage>e28040</fpage><pub-id pub-id-type="doi">10.2196/28040</pub-id><pub-id pub-id-type="medline">34657835</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Baretta</surname><given-names>D</given-names> </name><name name-style="western"><surname>Perski</surname><given-names>O</given-names> </name><name name-style="western"><surname>Steca</surname><given-names>P</given-names> </name></person-group><article-title>Exploring users&#x2019; experiences of the uptake and adoption of physical activity apps: longitudinal qualitative study</article-title><source>JMIR mhealth uHealth</source><year>2019</year><month>02</month><day>8</day><volume>7</volume><issue>2</issue><fpage>e11636</fpage><pub-id pub-id-type="doi">10.2196/11636</pub-id><pub-id pub-id-type="medline">30735143</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>X</given-names> </name><name name-style="western"><surname>Li</surname><given-names>Y</given-names> </name></person-group><article-title>Sports training strategies and interactive control methods based on neural network models</article-title><source>Comput Intell Neurosci</source><year>2022</year><volume>2022</volume><fpage>7624578</fpage><pub-id pub-id-type="doi">10.1155/2022/7624578</pub-id><pub-id pub-id-type="medline">35295278</pub-id></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Edriss</surname><given-names>S</given-names> </name><name name-style="western"><surname>Romagnoli</surname><given-names>C</given-names> </name><name name-style="western"><surname>Caprioli</surname><given-names>L</given-names> </name><name name-style="western"><surname>Bonaiuto</surname><given-names>V</given-names> </name><name name-style="western"><surname>Padua</surname><given-names>E</given-names> </name><name name-style="western"><surname>Annino</surname><given-names>G</given-names> </name></person-group><article-title>Commercial vision sensors and AI-based pose estimation frameworks for markerless motion analysis in sports and exercises: a mini review</article-title><source>Front Physiol</source><year>2025</year><volume>16</volume><fpage>1649330</fpage><pub-id pub-id-type="doi">10.3389/fphys.2025.1649330</pub-id><pub-id pub-id-type="medline">40873758</pub-id></nlm-citation></ref><ref id="ref54"><label>54</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liao</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Vakanski</surname><given-names>A</given-names> </name><name name-style="western"><surname>Xian</surname><given-names>M</given-names> </name></person-group><article-title>A deep learning framework for assessing physical rehabilitation exercises</article-title><source>IEEE Trans Neural Syst Rehabil Eng</source><year>2020</year><month>02</month><volume>28</volume><issue>2</issue><fpage>468</fpage><lpage>477</lpage><pub-id pub-id-type="doi">10.1109/TNSRE.2020.2966249</pub-id><pub-id pub-id-type="medline">31940544</pub-id></nlm-citation></ref><ref id="ref55"><label>55</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bauer</surname><given-names>CM</given-names> </name><name name-style="western"><surname>Kankaanp&#x00E4;&#x00E4;</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Meichtry</surname><given-names>A</given-names> </name><name name-style="western"><surname>Rissanen</surname><given-names>SM</given-names> </name><name name-style="western"><surname>Suni</surname><given-names>JH</given-names> </name></person-group><article-title>Efficacy of six months neuromuscular exercise on lumbar movement variability - a randomized controlled trial</article-title><source>J Electromyogr Kinesiol</source><year>2019</year><month>10</month><volume>48</volume><fpage>84</fpage><lpage>93</lpage><pub-id pub-id-type="doi">10.1016/j.jelekin.2019.06.008</pub-id><pub-id pub-id-type="medline">31252284</pub-id></nlm-citation></ref><ref id="ref56"><label>56</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Woo</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Jeong</surname><given-names>H</given-names> </name></person-group><article-title>Exercise assessment based on human pose estimation and relative phase for real&#x2011;time remote exercise system</article-title><source>IEEE Access</source><year>2025</year><volume>13</volume><fpage>53203</fpage><lpage>53213</lpage><pub-id pub-id-type="doi">10.1109/ACCESS.2025.3551834</pub-id></nlm-citation></ref><ref id="ref57"><label>57</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tharatipyakul</surname><given-names>A</given-names> </name><name name-style="western"><surname>Srikaewsiew</surname><given-names>T</given-names> </name><name name-style="western"><surname>Pongnumkul</surname><given-names>S</given-names> </name></person-group><article-title>Deep learning-based human body pose estimation in providing feedback for physical movement: a review</article-title><source>Heliyon</source><year>2024</year><month>08</month><day>26</day><volume>10</volume><issue>17</issue><fpage>e36589</fpage><pub-id pub-id-type="doi">10.1016/j.heliyon.2024.e36589</pub-id><pub-id pub-id-type="medline">39281455</pub-id></nlm-citation></ref><ref id="ref58"><label>58</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Elsayed</surname><given-names>H</given-names> </name><name name-style="western"><surname>Hoffmann</surname><given-names>P</given-names> </name><name name-style="western"><surname>G&#x00FC;nther</surname><given-names>S</given-names> </name><etal/></person-group><article-title>CameraReady: assessing the influence of display types and visualizations on posture guidance</article-title><conf-name>DIS &#x2019;21: Proceedings of the 2021 ACM Designing Interactive Systems Conference</conf-name><conf-date>Jun 28 to Jul 2, 2021</conf-date><pub-id pub-id-type="doi">10.1145/3461778.3462026</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Detailed results of generalized linear mixed-effects models, linear mixed-effects models, and post-hoc pairwise comparisons for push-ups and squats.</p><media xlink:href="mhealth_v14i1e82412_app1.doc" xlink:title="DOC File, 6016 KB"/></supplementary-material></app-group></back></article>