<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article
  PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD with MathML3 v1.2 20190208//EN" "JATS-journalpublishing1-mathml3.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" article-type="research-article" dtd-version="1.2" xml:lang="en">
<front>
<journal-meta><journal-id journal-id-type="publisher-id">METH</journal-id><journal-id journal-id-type="nlm-ta">Methodology</journal-id>
<journal-title-group>
<journal-title>Methodology</journal-title><abbrev-journal-title abbrev-type="pubmed">Methodology</abbrev-journal-title>
</journal-title-group>
<issn pub-type="ppub">1614-1881</issn>
<issn pub-type="epub">1614-2241</issn>
<publisher><publisher-name>PsychOpen</publisher-name></publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">meth.17405</article-id>
<article-id pub-id-type="doi">10.5964/meth.17405</article-id>
<article-categories>
<subj-group subj-group-type="heading"><subject>Original Article</subject></subj-group>
<subj-group subj-group-type="badge">
<subject>Data</subject>
<subject>Code</subject>
<subject>Materials</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>PLAViMoP and Eye Tracking: A Method to Integrate 2D Gaze Data Within a C3D File of a Point-Light Display</article-title>
<alt-title alt-title-type="right-running">Plavimop and Eye Tracking</alt-title>
<alt-title specific-use="APA-reference-style" xml:lang="en">Plavimop and eye tracking: A method to integrate 2D gaze data within a C3D file of a point-light display</alt-title>
</title-group>
<contrib-group content-type="authors">
	<contrib id="author-1" contrib-type="author"><contrib-id contrib-id-type="orcid" authenticated="false">https://orcid.org/0000-0003-0977-1541</contrib-id><name name-style="western"><surname>Francisco</surname><given-names>Victor</given-names></name><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref></contrib>
<contrib id="author-2" contrib-type="author"><contrib-id contrib-id-type="orcid" authenticated="false">https://orcid.org/0000-0003-0789-7458</contrib-id><name name-style="western"><surname>Dumoncel</surname><given-names>Jean</given-names></name><xref ref-type="aff" rid="aff1">1</xref></contrib>
<contrib id="author-3" contrib-type="author"><contrib-id contrib-id-type="orcid" authenticated="false">https://orcid.org/0000-0003-3849-6489</contrib-id><name name-style="western"><surname>Coudière</surname><given-names>Adrien</given-names></name><xref ref-type="aff" rid="aff1">1</xref></contrib>
<contrib id="author-4" contrib-type="author"><contrib-id contrib-id-type="orcid" authenticated="false">https://orcid.org/0000-0003-4296-7300</contrib-id><name name-style="western"><surname>Danion</surname><given-names>Frédéric</given-names></name><xref ref-type="aff" rid="aff1">1</xref></contrib>
	<contrib id="author-5" contrib-type="author" corresp="yes"><contrib-id contrib-id-type="orcid" authenticated="false">https://orcid.org/0000-0002-4699-179X</contrib-id><name name-style="western"><surname>Bidet-Ildei</surname><given-names>Christel</given-names></name><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="corresp" rid="cor1">*</xref></contrib>
<contrib id="author-6" contrib-type="author"><contrib-id contrib-id-type="orcid" authenticated="false">https://orcid.org/0000-0003-1040-6162</contrib-id><name name-style="western"><surname>Decatoire</surname><given-names>Arnaud</given-names></name><xref ref-type="aff" rid="aff2">2</xref></contrib>
<contrib contrib-type="editor">
<name>
<surname>Fernández-Castilla</surname>
<given-names>Belén</given-names>
</name>
<xref ref-type="aff" rid="aff5"/>
</contrib>
	
<aff id="aff1"><label>1</label><institution>Université de Poitiers - Université de Tours - CNRS - Centre de Recherches sur la Cognition et l’Apprentissage</institution>, <addr-line><city>Poitiers</city></addr-line>, <country country="FR">France</country></aff>
	<aff id="aff2"><label>2</label><institution>Université de Poitiers - ISAE-ENSMA - CNRS - PPRIME</institution>,  <addr-line><city>Poitiers</city></addr-line>, <country country="FR">France</country></aff>
	<aff id="aff3"><label>3</label><institution>Melioris - Centre de Médecine Physique et de Réadaptation Fonctionnelle Le Grand Feu</institution>,  <addr-line><city>Niort</city></addr-line>, <country country="FR">France</country></aff>
	<aff id="aff4"><label>4</label><institution>Institut Universitaire de France (IUF)</institution>,  <addr-line><city>Paris</city></addr-line>, <country country="FR">France</country></aff>
	<aff id="aff5">UNED | Universidad Nacional de Educación a Distancia, Madrid, <country>Spain</country>.</aff>
</contrib-group>
	<author-notes>
		<corresp id="cor1"><label>*</label>Université de Poitiers, MSHS - CeRCA - Bâtiment A5, 5, rue T. Lefebvre, TSA 21103, 86073 Poitiers Cedex 9, France. <email xlink:href="christel.bidet.ildei@univ-poitiers.fr">christel.bidet.ildei@univ-poitiers.fr</email></corresp>
	</author-notes>
<pub-date pub-type="epub"><day>18</day><month>12</month><year>2025</year></pub-date>
<pub-date pub-type="collection" publication-format="electronic"><year>2025</year></pub-date>
<volume>21</volume>
<issue>4</issue>

<fpage>346</fpage>
<lpage>369</lpage>
<history>
<date date-type="received">
<day>20</day>
<month>03</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>28</day>
<month>10</month>
<year>2025</year>
</date>
</history>
<permissions><copyright-year>2025</copyright-year><copyright-holder>Francisco, Dumoncel, Coudière et al.</copyright-holder><license license-type="open-access" specific-use="CC BY 4.0" xlink:href="https://creativecommons.org/licenses/by/4.0/"><ali:license_ref>https://creativecommons.org/licenses/by/4.0/</ali:license_ref><license-p>This is an open access article distributed under the terms of the Creative Commons Attribution 4.0 International License, CC BY 4.0, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.</license-p></license></permissions>
<abstract>
	<p>Understanding how humans observe and interpret actions is vital for social interaction. Point-light displays (PLDs), which depict actions using only joint movements, are widely used to study this process. Recently, PLAViMoP — an open-access database of 3D PLDs covering everyday actions, fine-motor skills, sports movements, facial expressions, social interactions, and robotic actions — has been introduced to facilitate the use of PLDs. PLAViMoP includes a search engine and metadata for each sequence, including movement type, label, actor sex, and age. In complement to the database, here we present a novel methodology that integrates eye-tracking data into the PLD reference frame, allowing gaze behavior and action kinematics to be jointly analyzed (i.e., in a unified dataset). This combined approach offers new insights into action perception and has broad applications in health, sports, and occupational settings. It also offers a promising tool for continuous psychophysical studies of the perception of biological movement.</p>
</abstract>
<kwd-group kwd-group-type="author"><kwd>point-light display</kwd><kwd>eye tracking</kwd><kwd>software</kwd><kwd>optimisation</kwd></kwd-group>

</article-meta>
</front>
<body>
	<sec sec-type="intro" id="intro"><title/>
		<p id="S1.p1">The Point-Light Display (PLD) paradigm, introduced by (<xref ref-type="bibr" rid="bib27">Johansson, 1973</xref>), is a method used to study the perception of biological motion (BM) by capturing the kinematic of key joints using illuminated points. This played a decisive role in revealing our innate sensitivity to biological motion (<xref ref-type="bibr" rid="bib52">Simion et al., 2008</xref>; <xref ref-type="bibr" rid="bib56">Vallortigara et al., 2005</xref>), which develops further with age, and is widely used in neuroscience and psychology to investigate perceptual and action recognition processes. Today, advances in technology allow PLDs to be created through algorithms (<xref ref-type="bibr" rid="bib18">Cutting, 1978</xref>) or real motion capture (<xref ref-type="bibr" rid="bib36">Manera et al., 2016</xref>) or mixed, capturing and modifying real movements (<xref ref-type="bibr" rid="bib34">Mahmood et al., 2019</xref>), providing flexibility for experimental research. There are also new AI-based approaches for extracting coordinates from conventional video (such as SmartDetector; <xref ref-type="bibr" rid="bib7">Bidet-Ildei, BenAhmed et al., 2024</xref>). Since 2018, we have been working with several colleagues on facilitating the use of PLD for more precise investigations of human sensitivity to BM (<xref ref-type="bibr" rid="bib7">Bidet-Ildei, BenAhmed et al., 2024</xref>; <xref ref-type="bibr" rid="bib9">Bidet-Ildei, Francisco et al., 2022</xref>; <xref ref-type="bibr" rid="bib19">Decatoire et al., 2018</xref>). Indeed, numerous researchers have consistently investigated the perception of BM for more than five decades. The quest for the understanding of biological movement seems to be a remarkably deep source of inquiry. This depth is partly due to the sensitivity of BM, which flirts with the innate and the acquired. There will be a marked precocity (<xref ref-type="bibr" rid="bib10">Bidet-Ildei et al., 2014</xref>; <xref ref-type="bibr" rid="bib17">Craighero et al., 2016</xref>) that can become more refined with development into adulthood (<xref ref-type="bibr" rid="bib45">Ross et al., 2012</xref>). This particularity allowed for the investigation of numerous cognitive abilities at different developmental stages, notably recognition and discrimination, as well as emotion (<xref ref-type="bibr" rid="bib1">Atkinson et al., 2004</xref>; <xref ref-type="bibr" rid="bib2">Bachmann et al., 2018</xref>; <xref ref-type="bibr" rid="bib58">Venesvirta et al., 2016</xref> for a review), identity (<xref ref-type="bibr" rid="bib5">Beardsworth &amp; Buckner, 1981</xref>, <xref ref-type="bibr" rid="bib16">Coste et al., 2021</xref>, <xref ref-type="bibr" rid="bib49">Sevdalis &amp; Keller, 2011</xref>), sex (<xref ref-type="bibr" rid="bib41">Pollick et al., 2005</xref>: <xref ref-type="bibr" rid="bib46">Runeson &amp; Frykholm, 1983</xref>) and language (<xref ref-type="bibr" rid="bib6">Beauprez et al., 2020</xref>; <xref ref-type="bibr" rid="bib13">Boulenger et al., 2009</xref>). Additionally, it has contributed to research on learning including motor learning (<xref ref-type="bibr" rid="bib21">Francisco et al., 2022</xref>; <xref ref-type="bibr" rid="bib25">Horn et al., 2002</xref>); mathematical cognition (<xref ref-type="bibr" rid="bib3">Badets et al., 2012</xref>; <xref ref-type="bibr" rid="bib11">Bidet-Ildei, Vilain et al., 2024</xref>) and memory (<xref ref-type="bibr" rid="bib59">Villatte et al., 2022</xref>) as well as pathological conditions (<xref ref-type="bibr" rid="bib8">Bidet-Ildei, Deborde et al., 2022</xref>; <xref ref-type="bibr" rid="bib22">Francisco et al., 2023</xref>; <xref ref-type="bibr" rid="bib26">Izawa et al., 2012</xref>; <xref ref-type="bibr" rid="bib37">Mazza et al., 2010</xref>). Moreover, PLD is also of interest in other domains, such as computer science and engineering (for a review, see <xref ref-type="bibr" rid="bib61">Yousefi &amp; Loo, 2019</xref>).</p>
<p id="S1.p2">To date, there are already numerous tools available to assist researchers wanting to use kinematics to investigate the understanding of BM by a participant. In this regard, several databases (<xref ref-type="bibr" rid="bib9">Bidet-Ildei, Francisco et al., 2022</xref>; <xref ref-type="bibr" rid="bib33">Ma et al., 2006</xref>; <xref ref-type="bibr" rid="bib35">Manera et al., 2010</xref>; <xref ref-type="bibr" rid="bib38">Okruszek &amp; Chrustowicz, 2020</xref>; <xref ref-type="bibr" rid="bib51">Shipley &amp; Brumberg, 2004</xref>; <xref ref-type="bibr" rid="bib57">Vanrie &amp; Verfaillie, 2004</xref>) and new technologies (<xref ref-type="bibr" rid="bib7">Bidet-Ildei, BenAhmed et al., 2024</xref>; <xref ref-type="bibr" rid="bib19">Decatoire et al., 2018</xref>; <xref ref-type="bibr" rid="bib40">Piwek et al., 2016</xref>; <xref ref-type="bibr" rid="bib50">Shi et al., 2018</xref>; <xref ref-type="bibr" rid="bib55">Thomas &amp; Seiffert, 2010</xref>) have been developed to easily produce their own stimuli. Yet, although current techniques allow to analyse the movement of the eye by means of its relative speed in relation to an element of the video (<xref ref-type="bibr" rid="bib30">Koerfer et al., 2024</xref>; <xref ref-type="bibr" rid="bib53">Souto et al., 2023</xref>), these techniques do not allow to specify the absolute position of the eye in relation to each element of a PLD. Eye-tracking tools record a participant’s eye movements, enabling the analysis of eye kinematics and the determination of gaze direction and positions at a given moment or over a period of time. Traditionally, eye-tracking has been used to analyse eye movements on conventional screens with 2D stimuli. To have a precise idea of gaze direction or gaze location various methodologies have already been proposed in the literature (see for a review, see <xref ref-type="bibr" rid="bib54">Sundstedt &amp; Garro, 2022</xref>). The most common technique is the gaze plot, which consists of superimposing circles on what is seen to indicate fixation points. Fixation maps (<xref ref-type="bibr" rid="bib28">Kasprowski &amp; Harężlak, 2017</xref>; <xref ref-type="bibr" rid="bib42">Rahman et al., 2020</xref>) utilize circles whose radius is proportional to the time spent during each fixation, these circles being numbered to indicate the order of fixations in the scene. It is also possible, and even common, to use the notion of area of interest (AOI). The AOI can be defined either by the experimenter (<xref ref-type="bibr" rid="bib29">King et al., 2019</xref>), who checks the number of times gaze enters and leaves the area, or by an algorithm that calculates this area a posteriori by grouping fixations (clustering).</p>
<p id="S1.p3">At this stage, many visualisation tools or techniques tools are thus available for using gaze trackers. However, until now, all of these have been designed in the context of a static image or natural video. As a result, they are not suitable for observing kinematics via PLDs, despite having a specific original format (C3D stands for “Coordinate 3D” and refers to a standard file format used for storing 3D motion capture data.) that could be exploited. For instance, when watching a PLD depicting a walking action, if gaze is positioned on the thigh, i.e. between hip and knee when the two legs cross each other, it would be very difficult to say which leg was being observed. In the same vein, when using AOI, given a stimulus animated by the movement of the action observed, it would be very complicated to obtain moving AOI capable of following the different segments unless a frame-by-frame labelling is employed. However, PLDs can be viewed as a sequence of images with no context, but where points of interest that have already been labelled (R_hip; R_knee; R_ankle; etc.) are tracked over time.</p>
<p id="S1.p4">To address these issues, while taking advantage of modern PLD acquisition techniques (i.e., 3D motion capture), we propose a new method that directly reconstructs gaze position within the file containing the PLD coordinates (c3d file). This direct reconstruction in the initial kinematic files used as stimuli, which contains the PLD, offers the advantage of keeping all data in a single file (including the possibility of integrating the gaze kinematics of n-participants for the same observed stimulus), thus facilitating direct comparison between gaze and joint positions. Overall, we believe that our new method offers a neat tool for continuous psychophysical studies (<xref ref-type="bibr" rid="bib14">Burge &amp; Bonnen, 2025</xref>) that seek to understand the evolution of eye behaviour in response to the task demand (ex. moment of recognition).</p></sec>
<sec id="s2" sec-type="method"><title>Method</title>
<sec id="s2_1"><title>EyeTracking and Setup</title>
	<p id="S2.SS1.Px1.p1">Participants were comfortably seated in a dark room facing a computer screen (ACER predator, 1920×1080, 27 inch, 240Hz) positioned 57cm away from them (see <xref ref-type="fig" rid="fig-1">Figure 1</xref>). At that viewing distance, a change in gaze of 1 degree corresponds to a displacement of 1 cm on the computer screen. To restrain head movements, participants had a chin rest and a padded forehead rest with gaze naturally oriented toward the center of screen. Horizontal and vertical positions of the right eye were recorded using an infrared video-based eye tracker (Desktop Eyelink 1000 system; SR Research). The output from the eye tracker was calibrated at the beginning of the experiment by recording the raw eye positions as participants fixated a grid composed of 9 known locations. The output of the eye tracker was fed into a data acquisition system (Keithley ADwin Real Time, Tektronix) and was recorded with a custom software (<xref ref-type="bibr" rid="bib20.5">DOCoMETRe, 2025</xref>) at 1000 Hz. Importantly, we ensured that the initiation/ending of eye data collection and display of PLDs were synchronous.</p><fig id="fig-1" position="anchor" orientation="portrait"><label>Figure 1</label><caption><title>Top View of the Experimental Setup</title><p><italic>Note</italic>. Participants sat in front of the screen with their head movements restrained by a headrest positioned 57 cm away from the screen.</p></caption><graphic mimetype="image" mime-subtype="png" xlink:href="meth.17405-f1.png" position="anchor" orientation="portrait"/></fig></sec>
<sec id="s2_2"><title>Video Stimuli Preparation</title>
	<p id="S2.SS2.Px1.p1">Starting from a c3d file, we first created a new “planar” c3d file using MATLAB (MathWorks) in conjunction with the Biomechanical Toolkit (<xref ref-type="bibr" rid="bib4">Barre &amp; Armand, 2014</xref>). This was achieved by projecting each point light onto a camera plane based on the camera’s azimuth and elevation settings (see <xref ref-type="fig" rid="fig-2">Figure 2</xref>). Consequently, coordinates along the orthogonal axis in the camera plane were always set to zero, as illustrated in <xref ref-type="fig" rid="fig-2">Figure 2</xref>d. These new c3d files were then resized and centered to ensure that the point-light display in each file fit within the same bounding box (e.g., width: 1888.6 mm, height: 2016.6 mm). Finally, 2D video files (frame rate: 30 Hz, resolution: 1920 × 1080 pixels) were generated for each stimulus and displayed on the screen.</p><fig id="fig-2" position="anchor" orientation="portrait"><label>Figure 2</label><caption><title>Example of a PLD Depicting a Human Performing a Pedaling Movement</title><p><italic>Note</italic>. (a) The original PLD in the motion capture reference frame from the front view. (b) The right-side view and its corresponding projection in a camera reference frame with azimuth set to 45° and elevation to 20°. (c) The three-quarter front view, which was displayed on the screen for participants. (d) The right-side view demonstrating that the new c3d file is planar. (e) It provides a perspective illustration of the pedaling action from a three-quarter view, accompanied by a kinogram of the lower limbs for better visualization.</p></caption><graphic mimetype="image" mime-subtype="png" xlink:href="meth.17405-f2.png" position="anchor" orientation="portrait"/></fig></sec>
<sec id="s2_3"><title>Inter-Referential Consistency Test (ICT)</title>
	<p id="S2.SS3.Px1.p1">To prevent potential issues arising from the conversion of centimeter-based eye coordinates in the EyeTracker’s screen reference frame to millimeters-based ones in the c3d reference frame, an ICT stimulus (<xref ref-type="fig" rid="fig-3">Figure 3</xref>) was created. The ICT stimulus allows to check referential changes (screen to .c3d file) after conventional eye-tracker calibration. This stimulus was specifically designed to validate the spatial transformation and ensure consistency between coordinate systems. The ICT stimulus consisted in a single point of light moving within the previously described bounding box, starting at the center of the frame and remaining there for five seconds. Additionally, two extra points of light were placed at the bottom-left and top-right corners of the bounding box to allow for manual localization of the video frames displayed on the screen (<xref ref-type="fig" rid="fig-3">Figure 3</xref>). In this way (the ICT stimulus is initially coded in c3d format and then converted to video) we can find out exactly how much space on the screen is occupied by the PLD videos. Each participant completed this trial once before the experimental session, in which they viewed more complex PLDs.</p><fig id="fig-3" position="anchor" orientation="portrait"><label>Figure 3</label><caption><title>Description of the Calibration Stimulus With the Calibration Trajectory and Bounding Box Limits</title><p><italic>Note</italic>. Only the white point was visible to participants during the experiment; the trajectory and additional point lights are included here for illustrative purposes only.</p></caption><graphic mimetype="image" mime-subtype="png" xlink:href="meth.17405-f3.png" position="anchor" orientation="portrait"/></fig></sec>
<sec id="s2_4"><title>Gaze Data Conversion Process</title>
<p id="S2.SS4.Px1.p1">As exposed above, the EyeTracker was calibrated to provide gaze coordinates in centimetres within the computer screen frame of reference, whose origin (0;0) corresponds to the centre of the screen. Here, the positive direction of the X-axis was to the right and that of the Y-axis was to the top. To determine which point(s) of the PLD, participants were looking at during the recognition task, it is mandatory that we employ a common frame of reference for the gaze data (2D) and the PLD (3D), meaning that they rely on a similar origin and units. We opted to convert the gaze coordinates in millimetres so as to match the c3d file reference frame. Below, we present a flow chart that describes the successive stages to achieve that switch in reference frame.</p><fig id="fig-4" position="anchor" orientation="portrait"><label>Figure 4</label><caption><title>Flow Chart of the Conversion Process for the Eye Data</title><p><italic>Note</italic>. For each participant, X and Y offsets and gain factors were identified by the optimisation process through the calibration stimulus. These conversion parameters were applied to all trials of the participant.</p></caption><graphic mimetype="image" mime-subtype="png" xlink:href="meth.17405-f4.png" position="anchor" orientation="portrait"/></fig>
	<p id="S2.SS4.Px1.p2">To ensure a good matching between eye data and the PLD, the participant was first asked to follow the ICT stimulus point-light (implicit smooth pursuit instruction) (<xref ref-type="fig" rid="fig-3">Figure 3</xref>) and the eye’s movement of each participant was recorded. Using the size of the screen as well as its resolution, it was easy to convert the gaze coordinates in centimetres provided by the eyetracker to pixel coordinates (<inline-formula><mml:math id="x1" display="inline"><mml:msubsup><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> and <inline-formula><mml:math id="x2" display="inline"><mml:msubsup><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> ratio). Then, by using the two additional point-lights in the corners of the ICT stimulus, we computed the ratio between screen sizes and c3d distances (<inline-formula><mml:math id="x3" display="inline"><mml:msubsup><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> and <inline-formula><mml:math id="x4" display="inline"><mml:msubsup><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> ratio). To justify the need of these two points in the corners, one needs to understand that the projection of a full-screen video keeps its proportions: on a rectangular screen a square video will touch the top/bottom edges, but not the right/left ones. A simple localisation tool developed with <xref ref-type="bibr" rid="bib1.5">AutoIt (2025)</xref> was used to get the mouse coordinates when the two additional point-lights were clicked by the experimenter and then compute <inline-formula><mml:math id="x5" display="inline"><mml:msubsup><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> and <inline-formula><mml:math id="x6" display="inline"><mml:msubsup><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> ratio. At this point, the eye coordinates are known in the c3d reference frame and expressed in millimetres. Even though great care was taken in calibrating the eye tracker, the eye trajectory obtained in the ICT c3d file after conversion still differed from the target the participants were supposed to follow (see <xref ref-type="fig" rid="fig-5">Figure 5</xref>a). An optimisation algorithm has been written to adjust the eye tracker measurements. The optimisation variables are the x and y offsets of the trajectory (<inline-formula><mml:math id="x7" display="inline"><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula><mml:math id="x8" display="inline"><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula>) as well as the x and y scale factor (<inline-formula><mml:math id="x9" display="inline"><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mn>3</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula><mml:math id="x10" display="inline"><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mn>4</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula>). Only bounds constraints (lb and ub) have been set to [-400 -400 0.5 0.5] and [400 400 1.5 1.5] respectively. The following cost function (f) was used to minimize the gap between the eye trajectory and the target one:</p>
<p id="S2.SS4.Px1.p3"><disp-formula id="e_1">
<mml:math id="x11" display="block"><mml:mtable columnalign="left"><mml:mtr><mml:mtd columnalign="right"/><mml:mtd columnalign="left"><mml:mi>f</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:mfrac><mml:mstyle displaystyle="true"><mml:munderover><mml:mo>∑</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:munderover></mml:mstyle><mml:mfenced separators="" open="[" close="]"><mml:mrow><mml:msup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:msub><mml:mo>−</mml:mo><mml:msub><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>+</mml:mo><mml:msup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub><mml:mo>−</mml:mo><mml:msub><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:mfenced></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula></p>
<p id="S2.SS4.Px1.p4">With:</p>
<p id="S2.SS4.Px1.p5"><disp-formula><mml:math id="x12" display="block"><mml:mtable columnalign="left"><mml:mtr><mml:mtd columnalign="right"/><mml:mtd columnalign="left"><mml:mfenced separators="" open="{" close=""><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>=</mml:mo><mml:mfenced separators="" open="(" close=")"><mml:mrow><mml:msubsup><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:msubsup><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:msubsup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msubsup><mml:mo>−</mml:mo><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:mfenced><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mn>3</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>=</mml:mo><mml:mfenced separators="" open="(" close=")"><mml:mrow><mml:msubsup><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:msubsup><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:msubsup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msubsup><mml:mo>−</mml:mo><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:mfenced><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mn>4</mml:mn></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:mfenced></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula></p>
<p id="S2.SS4.Px1.p6">Where <inline-formula><mml:math id="x13" display="inline"><mml:msub><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula><mml:math id="x14" display="inline"><mml:msub><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> are respectively the optimized <inline-formula><mml:math id="x15" display="inline"><mml:mi>x</mml:mi></mml:math></inline-formula>- and <inline-formula><mml:math id="x16" display="inline"><mml:mi>y</mml:mi></mml:math></inline-formula>-coordinates of the eye in millimetres in the C3D reference frame, <inline-formula><mml:math id="x17" display="inline"><mml:msub><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula><mml:math id="x18" display="inline"><mml:msub><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> are the <inline-formula><mml:math id="x19" display="inline"><mml:mi>x</mml:mi></mml:math></inline-formula>- and <inline-formula><mml:math id="x20" display="inline"><mml:mi>y</mml:mi></mml:math></inline-formula>-coordinates of the target to follow in millimetres in the C3D reference frame, <inline-formula><mml:math id="x21" display="inline"><mml:msubsup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> and <inline-formula><mml:math id="x22" display="inline"><mml:msubsup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> are the <inline-formula><mml:math id="x23" display="inline"><mml:mi>x</mml:mi></mml:math></inline-formula>- and <inline-formula><mml:math id="x24" display="inline"><mml:mi>y</mml:mi></mml:math></inline-formula>-coordinates of the eye in centimetres expressed in the screen reference frame, and <inline-formula><mml:math id="x25" display="inline"><mml:mi>n</mml:mi></mml:math></inline-formula> is the number of frames of the C3D file.</p>
<p id="S2.SS4.Px1.p7">The problem was solved using the fmincon function of MATLAB R2021b with the sqp algorithm and <inline-formula><mml:math id="x26" display="inline"><mml:msub><mml:mrow><mml:mi mathvariant="bold">x</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mo stretchy="false">[</mml:mo><mml:mn>0</mml:mn><mml:mspace width="2.77695pt"/><mml:mn>0</mml:mn><mml:mspace width="2.77695pt"/><mml:mn>1</mml:mn><mml:mspace width="2.77695pt"/><mml:mn>1</mml:mn><mml:mo stretchy="false">]</mml:mo></mml:math></inline-formula> as the initial guess. <xref ref-type="fig" rid="fig-5">Figure 5</xref>b shows the adjustments of the eye trajectory after the optimisation process. Finally, portions of eye signals containing blinks were removed using the rate of change of pupil diameter, with a threshold set to <inline-formula><mml:math id="x27" display="inline"><mml:mn>5</mml:mn><mml:mspace width="0.3em"/><mml:mtext>cm/s</mml:mtext></mml:math></inline-formula>.</p><fig id="fig-5" position="anchor" orientation="portrait"><label>Figure 5</label><caption><title>Comparison Between the Eye and Target Trajectory, a) Before, b) After the Optimisation Process</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="meth.17405-f5.png" position="anchor" orientation="portrait"/></fig>
<p id="S2.SS4.Px1.p8">These four parameters (x and y offsets of the trajectory and the x and y scale factors) obtained with this ICT stimulus should be applied to each trial for a subject throughout the same session. In this way, the kinematics of the eye can be visualised along with the movement seen by the participant (<xref ref-type="fig" rid="fig-6">Figure 6</xref>).</p>
	<p id="S2.SS4.Px1.p9">Although we had to use a specific device described in the “Eye tracking and configuration” section, we are providing the scientific community with a tutorial in the following section explaining how to add the eye trajectory of a subject watching a PLD video directly from the native recordings of the EyeTrackers (edf format).</p>
	<p id="S2.SS4.Px1.p10">Please note that the data set and code specific to our initial experiment (with DOCoMETRe) or related to the tutorial can be found by following the OSF link in this article.</p></sec></sec><?figure fig-5?><?figure fig-6?>
<sec id="s3" sec-type="body"><title>Tutorial</title>
<p id="S3.SS1.Px1.p1">This section details the step-by-step process for adding the gaze movement into an observed c3d file, starting from the records of edf files by EyeLink (all necessary files are available for download at the OSF link).</p><fig id="fig-6" position="anchor" orientation="portrait"><label>Figure 6</label><caption><title>Planar Projection of the Kinogram of the Jumping Movement, With the Participant’s Eye (Red Tail) Reintroduced Into the C3D File</title><p><italic>Note</italic>. This file is the original point-light display file that was used to generate the video stimulus, corresponding to the projection of each point light in the camera plane, depending on the camera’s azimuth and elevation parameters (i.e. the coordinates along the orthogonal axis in the camera plane are always equal to zero). The trajectories of the left and right ankles are displayed to help with recognising the movement.</p></caption><graphic mimetype="image" mime-subtype="png" xlink:href="meth.17405-f6.png" position="anchor" orientation="portrait"/></fig>
	<?pagebreak-before?><p id="S3.SS1.Px1.p2">The prerequisites are:</p>
<list list-type="bullet">
<list-item>
<p>It is assumed that an experimental design has been set up to synchronize the recording of the eye by the EyeLink and the playback of the video via, for example, WebLink or Experiment Builder, which allow tags to be defined to identify the start and end of the presentation of a video stimulus.</p>
	</list-item>
<list-item>
	<p>Download the Biomechanics ToolKit for Matlab using this link which allows to manipulate c3d files (<xref ref-type="bibr" rid="bib11.5">Biomechanics ToolKit, 2025</xref>). Unzip the file, the library directory will be called BTK_Path afterwards.</p></list-item>
<list-item>
	<p>Free register on the SR Research support forum then download and install the EyeLink development kit (<xref ref-type="bibr" rid="bib53.25">SR Research Support Forum, 2025a</xref>) and the mex files folder for Matlab (<xref ref-type="bibr" rid="bib53.5">SR Research Support Forum, 2025b</xref>). Unzip the file, the mex files directory will be called Mex_Path afterwards.</p></list-item>
<list-item>
<p>Prepare the stimuli as described in the “Video stimuli preparation” section as well as the ICT stimulus as presented in the corresponding section. Ensure that the resolution of the video you create is identical to that of the screen on which it will be played. Make sure that the boundaries of the observed stimulus and the ICT stimulus are identical. Keep the bounding box limits (in mm), which you will be asked to enter later.</p></list-item>
</list>
<p id="S3.SS1.Px1.p3">To facilitate this step, below we provide the procedure and the functions needed to create the stimulus videos and the ICT stimulus.</p>
<p id="S3.SS1.Px1.p4">First, gather all the c3d files that will be used during the experiment into a single folder (called for example “Original C3D”) and create two other folders called for example: “Projected C3D” and “Experimental C3D and videos.”</p>
<p id="S3.SS1.Px1.p5">For each one (c3d files), identify the 2D viewing angle (camera azimuth and elevation) at which you want to generate the stimuli, then use the “Project_C3D” function for each file, which will create a projected C3D in the camera view. Then place these files into the folder “Projected C3D.”</p>
	<p id="S3.SS1.Px1.p6">Finally, call the function “Generate_ICT_and_Stimulus_Videos_and_c3d.” You will need to provide the projected C3D folder, the backup folder for newly created files (for example, “Experimental C3D and videos”), the BTK library folder and the following information (see <xref ref-type="fig" rid="fig-7">Figure 7</xref> below):</p>	
	<fig id="fig-7" position="anchor" orientation="portrait"><label>Figure 7</label><caption><title>Input Screen for Generating ICT, Stimulus Videos and C3D</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="meth.17405-f7" position="anchor" orientation="portrait"/></fig>
		
	
<p id="S3.SS1.Px1.p7">Margins represent the minimum distance (in mm) between the edges of the image and the PLD dots. By adjusting the “Point’s velocity of the ICT file,” you can manage how fast (or slow) the target point moves during the inter-referential consistency test.</p>
	<p id="S3.SS1.Px1.p8">Optionally, you can request the creation of the ICT stimulus and experimental c3d and MPEG4 files with the two additional points added in order to verify the spatial concordance between the experimental stimuli and the ICT stimulus (same bounding box). The function outputs the bounding box limits in the Matlab command prompt and creates the video associated with each c3d file, as well as a c3d file and the ICT stimulus video, if requested (see <xref ref-type="fig" rid="fig-8">Figure 8</xref> below).</p><fig id="fig-8" position="anchor" orientation="portrait"><label>Figure 8</label><caption><title>Bounding Box Limits in the Matlab Command Prompt</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="meth.17405-f8" position="anchor" orientation="portrait"/></fig>
<list list-type="bullet">
<list-item>
<p>Calibrate the Eyelink following the manufacturers’ recommendations.</p></list-item>
<list-item>
	<p>Use the simple localisation calibration tool (EyeTracker_Calibrate.exe) developed with <xref ref-type="bibr" rid="bib1.5">AutoIt (2025)</xref> to get the mouse coordinates (in pixels) of the two additional point-lights of the ICT stimulus that will be saved in a Pos.txt file at the same location as the executable.</p></list-item>
<list-item>
<p>Record the gaze movement of a subject observing the ICT stimulus.</p></list-item>
<list-item>
<p>Record the gaze movement of a subject observing the stimulus.</p></list-item>
</list>
<p id="S3.SS1.Px1.p9">At this stage, you have a Pos.txt file containing the coordinates (in pixels) of the two additional point-lights of the ICT stimulus, two edf files (one for the ICT stimulus called afterwards ICT.edf and the other for the observed stimulus called Stim.edf) and two c3d files corresponding to ICT stimulus and to the observed stimulus. For the rest of this tutorial, we assume that all these files are located in the folder called File_Path afterwards.</p>
<p id="S3.SS1.Px1.p10">With Matlab, prepare the Input variable (type struct) required to call the function AddEye_From_EDF.m as follow:</p>
	<p id="S3.SS1.Px1.p11">Once the Inputs variable is created, just call AddEye_From_EDF(Inputs) in the command Window of Matlab (see <xref ref-type="fig" rid="fig-9">Figure 9</xref> below).</p><fig id="fig-9" position="anchor" orientation="portrait"><label>Figure 9</label><caption><title>Prepare the Input Variable (Type struct) Required to Call the Function AddEye_From_EDF.m</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="meth.17405-f9" position="anchor" orientation="portrait"/></fig>
<p id="S3.SS1.Px1.p12">Note that if you do not specify the Inputs variable, the function will ask for files with uigetfile dialog controls, thus just call AddEye_From_EDF() in the command window of Matlab.</p>
	<p id="S3.SS1.Px1.p13">The following input dialog box will be displayed and you will have to fill in all fields with your appropriate settings (here are the default settings corresponding to the dataset provided in this tutorial; see <xref ref-type="fig" rid="fig-10">Figure 10</xref>).</p><fig id="fig-10" position="anchor" orientation="portrait"><label>Figure 10</label><caption><title>Input Box Display to Call AddEye_From_EDF() in the Command Window of Matlab</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="meth.17405-f10" position="anchor" orientation="portrait"/></fig>
	<p id="S3.SS1.Px1.p15">After clicking on the OK button, two new c3d files will be created with the suffix “_With eye:” one for the ICT file and one for the observed PLD. A marker represented the movement of the eye has been added into these files (see <xref ref-type="fig" rid="fig-11">Figure 11</xref>).</p><fig id="fig-11" position="anchor" orientation="portrait"><label>Figure 11</label><caption><title>(Left) ICT C3D File With Eye Added in Yellow and Target Marker in Purple, (Right) Observed Stimulus With Eye Added in Yellow</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="meth.17405-f11.png" position="anchor" orientation="portrait"/></fig></sec><?figure fig-10?<?figure fig-11?>
<sec id="s4" sec-type="discussion"><title>Discussion</title>
<p id="S4.SS1.Px1.p1">For the past five decades, researchers have been interested in and have deepened their knowledge of the perception of biological movement. By investigating the reason for this great sensitivity to the biological movement of human (<xref ref-type="bibr" rid="bib10">Bidet-Ildei et al., 2014</xref>; <xref ref-type="bibr" rid="bib17">Craighero et al., 2016</xref>) or non-human animals (<xref ref-type="bibr" rid="bib12">Blake, 1993</xref>; <xref ref-type="bibr" rid="bib32">Lorenzi et al., 2024</xref>; <xref ref-type="bibr" rid="bib43">Regolin et al., 2000</xref>), the scientific community has continued to propose new questions. However, to date, the inability to locate where gaze is directed at during observation represents an obstacle for deeper understanding of biological movement perception, a problem exacerbated by the lack of tools to define areas of interest (AOI) that move. As mentioned in the introduction, the PLD is a sequence of images with multiple labelled AOIs. The second challenge is that the PLD consists of a cloud of points whose meaning emerges only once the participant interprets it based on their own perception. For example, the kinematics (the PLD) of walking in orthogonal projection (not in perspective) and in the transverse plane of the screen (i.e. walking in the direction of the observer) could just be easily perceived as walking forwards or backwards. These particularities make the selection of AOIs more problematic. Using walking kinematics as an example, walking can be interpreted as either a forward movement or a backward movement, thus constituting an ambiguous stimulus. Participants can adopt either interpretation depending on perceptual cues. When the stimuli is perceived as walking forward, attention is probably focused on the lower limbs. In contrast, when the stimuli is perceived walking backward, attention may shift toward more salient features such as trunk or head movements. This example illustrates the inherent difficulty in accurately predicting areas of visual interest (AOI) in perceptual tasks. Here, we propose an initial approach to address this challenge by accounting for the specific nature of PLD stimuli. Our method involves directly reconstructing eye kinematics — represented by a new point — within the PLD’s own kinematics using the stimulus points. It is important to note that this approach was developed based on data from an EyeLink eye tracker and a final output file of digital data. However, the process requires only a file containing the 2D [x,y] coordinates (in centimeters) of the eye on the screen. Additionally, while the present study records the kinematics of a single eye, the same method could be applied to binocular recordings by integrating the coordinate table of the second eye in the same manner. Before reconstructing eye movements within the stimulus, we propose, as a precaution, a specific ICT procedure consisting of a smooth tracking of a single dot (close to a smooth-tracking calibration task, one approach provides a more natural and efficient calibration, while maintaining good accuracy (<xref ref-type="bibr" rid="bib23">Hassoumi et al., 2019</xref>; <xref ref-type="bibr" rid="bib31">Li et al., 2025</xref>). The primary purpose of this ICT is to counteract potential participant-specific drift in tracking, which differs from the standard calibration using a fixation grid (<xref ref-type="bibr" rid="bib24">Hooge et al., 2024</xref>). Indeed, calibration by eye smooth pursuit felt less intrusive and more pleasant for participants, in the sense that it allows calibration without explicit cooperation. Moreover it is functionally relevant because it is based on the correlation between eye movement and target movement (<xref ref-type="bibr" rid="bib39">Pfeuffer et al., 2013</xref>). However, the real motivation for ICT was to ensure the correct integration of the homographic coordinate in the stimulus coordinate file itself. While optimizing the calibration is not mandatory, even a small offset can become problematic during the conversion and integration of PLD coordinates and frame rate. Therefore, an optimization step could be implemented to correct any offset, ensuring that this correction is applied to all subsequent conversions and eye movement reconstructions for the participant.</p>
	<p id="S4.SS1.Px1.p2">The present proposal for eye reconstruction in the PLD stimuli, like we introduce above, takes advantage of the particularity of the stimuli. Indeed, a PLD is initially a c3d file. This kind of file contains the 3D coordinates of each point in addition to their individual labels. It should be noted that each point is identifiable and locatable over time, thus each point can be considered as its own AOI. The added new points, corresponding to X and Y coordinates of gaze, can then be directly compared to the other points at each moment. Moreover, the methodology exposed in this paper which allows to insert the eye kinematic data into the c3d file, will be affiliated to a new plug-in of PLAViMoP software (<xref ref-type="bibr" rid="bib19">Decatoire et al., 2018</xref>; <xref ref-type="bibr" rid="bib40.5">PLAViMoP, 2025</xref>). Importantly, on the PLAViMoP platform (<xref ref-type="bibr" rid="bib9">Bidet-Ildei, Francisco et al., 2022</xref>), we provide the algorithm that allows the eye motion reconstruction within the PLD stimulus. In the future, we plan to develop a more user-friendly interface enabling to characterize ocular behaviour such a fixation or saccade, and draw confidence ellipses around a given point using sliders to select thresholds. Indeed, in its current version, you need first to manually identify fixation/saccade in order to visualise it as a new point in the stimulus. In the PLAViMoP tool, it would then be sufficient to colour the two points to visualise the eye, for example blue for fixation and red for saccade, as a single point changing colour. In addition to the visualisation aspect, it would also be very easy to measure the distance of the “fixation” point and the “saccade” point from the other markers.</p>
<p id="S4.SS1.Px1.p3">Finally, our approach enables further investigation into what captures the eye’s attention during action observation, depending on the nature of the task. This issue is important to subsequently explore additional aspects such as the salience features of BM and moments of recognition. One promising approach is to compile a dataset of eye movements across observers in order to identify distinguishing features of gaze in biological motion, in a manner analogous to approaches in face perception. For instance, <xref ref-type="bibr" rid="bib15">Caldara et al. (2010)</xref> showed that people from different cultural backgrounds may have different fixation strategies (e.g., looking at the eyes or the nose) when faced with limited visual conditions. This suggests that eye movement patterns can reflect different perceptual strategies when processing ambiguous stimuli. Moreover, in the context of biological movement, the question of action observation in motor learning (<xref ref-type="bibr" rid="bib44">Rizzolatti et al., 2021</xref>; <xref ref-type="bibr" rid="bib60">Vogt &amp; Thomaschke, 2007</xref>), particularly in sports, arises (<xref ref-type="bibr" rid="bib20">D’Innocenzo et al., 2016</xref>; <xref ref-type="bibr" rid="bib25">Horn et al., 2002</xref>) and understanding where the gaze is directed during observation could provide new and interesting insights. Notably in a recent study (<xref ref-type="bibr" rid="bib21">Francisco et al., 2022</xref>), we demonstrated that action observation combined with physical training could guide the observer’s attentional focus, suggesting that certain regions or points of the PLD might be more informative than others. However, a key missing element is the ability to determine what actually captured the observer’s gaze to support this conclusion. It is possible that the perceived importance of certain parameters is influenced by different kinematic cues, making gaze localization a valuable area of investigation. In the same way, identifying the observer’s gaze location could help optimize action observation training for the rehabilitation of central and peripheral motor disorders (<xref ref-type="bibr" rid="bib47">Ryan et al., 2021</xref>; <xref ref-type="bibr" rid="bib48">Sarasso et al., 2015</xref>).</p>
<p id="S4.SS1.Px1.p4">In summary, the proposed gaze localization methodology allows for the study of gaze behavior during perception, offering insights into the mechanisms involved in processing biological motion.</p></sec>
<sec id="s5" sec-type="conclusion"><title>Conclusion</title>
<p id="S5.SS1.Px1.p1">To conclude, although the analysis of eye movements is believed to enhance the relevance of PLDs for both theoretical and applied research, there is, to date, a lack of methods that align eye movements with PLD. The technique we propose here, which combines 2D eye-tracking data with the PLD’s c3d file within the same reference frame, provides a solution to this gap. This approach opens new avenues for understanding biological movement perception and offers potential applications in fields such as health, sports, and occupational studies.</p>
</sec>
</body>
<back>
<ref-list><title>References</title>
	<ref id="bib1"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Atkinson</surname>, <given-names>A. P.</given-names></string-name>, <string-name name-style="western"><surname>Dittrich</surname>, <given-names>W. H.</given-names></string-name>, <string-name name-style="western"><surname>Gemmell</surname>, <given-names>A. J.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Young</surname>, <given-names>A. W.</given-names></string-name> (<year>2004</year>). <article-title>Emotion perception from dynamic and static body expressions in point-light and full-light displays</article-title>. <source><italic>Perception</italic></source>, <volume>33</volume>(<issue>6</issue>), <fpage>717</fpage>–<lpage>746</lpage>. <pub-id pub-id-type="doi">10.1068/p5096</pub-id></mixed-citation></ref>
	
	<ref id="bib1.5"><mixed-citation publication-type="web">AutoIt. (2025). <italic>AutoIt Freeware BASIC-like Scripting Language</italic>. AutoIt Consulting. <ext-link ext-link-type="uri" xlink:href="https://www.autoitscript.com/site/">https://www.autoitscript.com/site/</ext-link></mixed-citation></ref>
	
	
	<ref id="bib2"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Bachmann</surname>, <given-names>J.</given-names></string-name>, <string-name name-style="western"><surname>Munzert</surname>, <given-names>J.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Krueger</surname>, <given-names>B.</given-names></string-name> ( <year>2018</year>). <article-title>Neural underpinnings of the perception of emotional states derived from biological human motion: A review of neuroimaging research</article-title>. <source>Frontiers in Psychology</source>, <volume>9</volume>, <elocation-id>1763</elocation-id>. <pub-id pub-id-type="doi">10.3389/fpsyg.2018.01763</pub-id></mixed-citation></ref>
	<ref id="bib3"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Badets</surname>, <given-names>A.</given-names></string-name>, <string-name name-style="western"><surname>Bouquet</surname>, <given-names>C. A.</given-names></string-name>, <string-name name-style="western"><surname>Ric</surname>, <given-names>F.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Pesenti</surname>, <given-names>M.</given-names></string-name> (<year>2012</year>). <article-title>Number generation bias after action observation</article-title>. <source>Experimental Brain Research</source>, <volume>221</volume>(<issue>1</issue>), <fpage>43</fpage>–<lpage>49</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-012-3145-1</pub-id></mixed-citation></ref>
	<ref id="bib4"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Barre</surname>, <given-names>A.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Armand</surname>, <given-names>S.</given-names></string-name> (<year>2014</year>). <article-title>Biomechanical toolkit: Open-source framework to visualize and process biomechanical data</article-title>. <source>Computer Methods and Programs in Biomedicine</source>, <volume>114</volume>(<issue>1</issue>), <fpage>80</fpage>–<lpage>87</lpage>. <pub-id pub-id-type="doi">10.1016/j.cmpb.2014.01.012</pub-id></mixed-citation></ref>
	<ref id="bib5"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Beardsworth</surname>, <given-names>T.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Buckner</surname>, <given-names>T.</given-names></string-name> (<year>1981</year>). <article-title>The ability to recognize oneself from a video recording of one’s movements without seeing one’s body</article-title>. <source>Bulletin of the Psychonomic Society</source>, <volume>18</volume>(<issue>1</issue>), <fpage>19</fpage>–<lpage>22</lpage>. <pub-id pub-id-type="doi">10.3758/BF03333558</pub-id></mixed-citation></ref>
	<ref id="bib6"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Beauprez</surname>, <given-names>S.-A.</given-names></string-name>, <string-name name-style="western"><surname>Blandin</surname>, <given-names>Y.</given-names></string-name>, <string-name name-style="western"><surname>Almecija</surname>, <given-names>Y.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Bidet-Ildei</surname>, <given-names>C.</given-names></string-name> (<year>2020</year>). <article-title>Physical and observational practices of unusual actions prime action verb processing</article-title>. <source>Brain and Cognition</source>, <volume>138</volume>, <elocation-id>103630</elocation-id>. <pub-id pub-id-type="doi">10.1016/j.bandc.2019.103630</pub-id></mixed-citation></ref>
	<ref id="bib7"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Bidet-Ildei</surname>, <given-names>C.</given-names></string-name>, <string-name name-style="western"><surname>BenAhmed</surname>, <given-names>O.</given-names></string-name>, <string-name name-style="western"><surname>Bouidaine</surname>, <given-names>D.</given-names></string-name>, <string-name name-style="western"><surname>Francisco</surname>, <given-names>V.</given-names></string-name>, <string-name name-style="western"><surname>Decatoire</surname>, <given-names>A.</given-names></string-name>, <string-name name-style="western"><surname>Blandin</surname>, <given-names>Y.</given-names></string-name>, <string-name name-style="western"><surname>Pylouster</surname>, <given-names>J.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Fernandez-Maloigne</surname>, <given-names>C.</given-names></string-name> (<year>2024</year>). <article-title>Smartdetector: Automatic and vision-based approach to point-light display generation for human action perception</article-title>. <source>Behavior Research Methods</source>, <volume>56</volume>, <fpage>8349</fpage>–<lpage>8361</lpage>. <pub-id pub-id-type="doi">10.3758/s13428-024-02478-1</pub-id></mixed-citation></ref>
	<ref id="bib8"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Bidet-Ildei</surname>, <given-names>C.</given-names></string-name>, <string-name name-style="western"><surname>Deborde</surname>, <given-names>Q.</given-names></string-name>, <string-name name-style="western"><surname>Francisco</surname>, <given-names>V.</given-names></string-name>, <string-name name-style="western"><surname>Gand</surname>, <given-names>E.</given-names></string-name>, <string-name name-style="western"><surname>Blandin</surname>, <given-names>Y.</given-names></string-name>, <string-name name-style="western"><surname>Delaubier</surname>, <given-names>A.</given-names></string-name>, <string-name name-style="western"><surname>Jossart</surname>, <given-names>A.</given-names></string-name>, <string-name name-style="western"><surname>Rigoard</surname>, <given-names>P.</given-names></string-name>, <string-name name-style="western"><surname>Billot</surname>, <given-names>M.</given-names></string-name>, &amp; <string-name name-style="western"><surname>David</surname>, <given-names>R.</given-names></string-name> (<year>2022</year>). <article-title>The added value of point-light display observation in total knee arthroplasty rehabilitation program: A prospective randomized controlled pilot study</article-title>. <source>Medicina</source>, <volume>58</volume>(<issue>7</issue>), <elocation-id>868</elocation-id>. <pub-id pub-id-type="doi">10.3390/medicina58070868</pub-id></mixed-citation></ref>
	<ref id="bib9"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Bidet-Ildei</surname>, <given-names>C.</given-names></string-name>, <string-name name-style="western"><surname>Francisco</surname>, <given-names>V.</given-names></string-name>, <string-name name-style="western"><surname>Decatoire</surname>, <given-names>A.</given-names></string-name>, <string-name name-style="western"><surname>Pylouster</surname>, <given-names>J.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Blandin</surname>, <given-names>Y.</given-names></string-name> (<year>2022</year>). <article-title>Plavimop database: A new continuously assessed and collaborative 3d point-light display dataset</article-title>. <source>Behavior Research Methods</source>, <volume>55</volume>, <fpage>694</fpage>–<lpage>715</lpage>. <pub-id pub-id-type="doi">10.3758/s13428-022-01850-3</pub-id></mixed-citation></ref>
	<ref id="bib10"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Bidet-Ildei</surname>, <given-names>C.</given-names></string-name>, <string-name name-style="western"><surname>Kitromilides</surname>, <given-names>E.</given-names></string-name>, <string-name name-style="western"><surname>Orliaguet</surname>, <given-names>J.-P</given-names></string-name>., <string-name name-style="western"><surname>Pavlova</surname>, <given-names>M.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Gentaz</surname>, <given-names>E.</given-names></string-name> (<year>2014</year>). <article-title>Preference for point-light human biological motion in newborns: Contribution of translational displacement</article-title>. <source>Developmental Psychology</source>, <volume>50</volume>(<issue>1</issue>), <fpage>113</fpage>–<lpage>120</lpage>. <pub-id pub-id-type="doi">10.1037/a0032956</pub-id></mixed-citation></ref>
	<ref id="bib11"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Bidet-Ildei</surname>, <given-names>C.</given-names></string-name>, <string-name name-style="western"><surname>Vilain</surname>, <given-names>C.</given-names></string-name>, <string-name name-style="western"><surname>Fevin</surname>, <given-names>S.</given-names></string-name>, <string-name name-style="western"><surname>Francisco</surname>, <given-names>V.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Vibert</surname>, <given-names>N.</given-names></string-name> (<year>2024</year>). <article-title>The role of finger kinematics in the acquisition of number meaning in kindergarten: A pilot study</article-title>. <source>Frontiers in Education</source>, <volume>8</volume>, <elocation-id>1252731</elocation-id>.  <pub-id pub-id-type="doi">10.3389/feduc.2023.1252731</pub-id></mixed-citation></ref>
	
	
	<ref id="bib11.5"><mixed-citation publication-type="web">Biomechanics ToolKit. (2025). <italic>b-tk for Matlab</italic>. Google Code | Archive. <ext-link ext-link-type="uri" xlink:href="https://code.google.com/archive/p/b-tk/downloads">https://code.google.com/archive/p/b-tk/downloads</ext-link></mixed-citation></ref>
	
	
	<ref id="bib12"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Blake</surname>, <given-names>R.</given-names></string-name> (<year>1993</year>). <article-title>Cats perceive biological motion</article-title>. <source>Psychological Science</source>, <volume>4</volume>(<issue>1</issue>), <fpage>54</fpage>–<lpage>57</lpage>.<pub-id pub-id-type="doi">10.1111/j.1467-9280.1993.tb00557.x</pub-id></mixed-citation></ref>
	<ref id="bib13"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Boulenger</surname>, <given-names>V.</given-names></string-name>, <string-name name-style="western"><surname>Hauk</surname>, <given-names>O.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Pulvermüller</surname>, <given-names>F.</given-names></string-name> (<year>2009</year>). <article-title>Grasping ideas with the motor system: Semantic somatotopy in idiom comprehension</article-title>. <source>Cerebral Cortex</source>, <volume>19</volume>(<issue>8</issue>), <fpage>1905</fpage>–<lpage>1914</lpage>. <pub-id pub-id-type="doi">10.1093/cercor/bhn217</pub-id></mixed-citation></ref>
	<ref id="bib14"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Burge</surname>, <given-names>J.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Bonnen</surname>, <given-names>K.</given-names></string-name> (<year>2025</year>). <article-title>Continuous psychophysics: Past, present, future</article-title>. <source>Trends in Cognitive Sciences</source>, <volume>29</volume>(<issue>5</issue>), <fpage>481</fpage>–<lpage>493</lpage>. <pub-id pub-id-type="doi">10.1016/j.tics.2025.01.005</pub-id></mixed-citation></ref>
	<ref id="bib15"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Caldara</surname>, <given-names>R.</given-names></string-name>, <string-name name-style="western"><surname>Zhou</surname>, <given-names>X.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Miellet</surname>, <given-names>S.</given-names></string-name> (<year>2010</year>). <article-title>Putting culture under the ”spotlight” reveals universal information use for face recognition</article-title>. <source>PLoS ONE</source>, <volume>5</volume>(<issue>3</issue>), <elocation-id>e9708</elocation-id>. <pub-id pub-id-type="doi">10.1371/journal.pone.0009708</pub-id></mixed-citation></ref>
	<ref id="bib16"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Coste</surname>, <given-names>A.</given-names></string-name>, <string-name name-style="western"><surname>Bardy</surname>, <given-names>B. G.</given-names></string-name>, <string-name name-style="western"><surname>Janaqi</surname>, <given-names>S.</given-names></string-name>, <string-name name-style="western"><surname>Słowiński</surname>, <given-names>P.</given-names></string-name>, <string-name name-style="western"><surname>Tsaneva-Atanasova</surname>, <given-names>K.</given-names></string-name>, <string-name name-style="western"><surname>Goupil</surname>, <given-names>J. L.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Marin</surname>, <given-names>L.</given-names></string-name> (<year>2021</year>). <article-title>Decoding identity from motion: How motor similarities colour our perception of self and others</article-title>. <source>Psychological Research</source>, <volume>85</volume>(<issue>2</issue>), <fpage>509</fpage>–<lpage>519</lpage>. <pub-id pub-id-type="doi">10.1007/s00426-020-01290-8</pub-id></mixed-citation></ref>
	<ref id="bib17"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Craighero</surname>, <given-names>L.</given-names></string-name>, <string-name name-style="western"><surname>Lunghi</surname>, <given-names>M.</given-names></string-name>, <string-name name-style="western"><surname>Leo</surname>, <given-names>I.</given-names></string-name>, <string-name name-style="western"><surname>Ghirardi</surname>, <given-names>V.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Simion</surname>, <given-names>F.</given-names></string-name> (<year>2016</year>). <article-title>Newborns’ attention is driven by the translational movement</article-title>. <source>Visual Cognition</source>, <volume>24</volume>(<issue>9</issue>), <fpage>487</fpage>–<lpage>498</lpage>. <pub-id pub-id-type="doi">10.1080/13506285.2017.1322651</pub-id></mixed-citation></ref>
	<ref id="bib18"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Cutting</surname>, <given-names>J. E.</given-names></string-name> (<year>1978</year>). <article-title>Generation of synthetic male and female walkers through manipulation of a biomechanical invariant</article-title>. <source>Perception</source>, <volume>7</volume>(<issue>4</issue>), <fpage>393</fpage>–<lpage>405</lpage>. <pub-id pub-id-type="doi">10.1068/p070393</pub-id> </mixed-citation></ref>
	<ref id="bib19"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Decatoire</surname>, <given-names>A.</given-names></string-name>, <string-name name-style="western"><surname>Beauprez</surname>, <given-names>S.-A.</given-names></string-name>, <string-name name-style="western"><surname>Pylouster</surname>, <given-names>J.</given-names></string-name>, <string-name name-style="western"><surname>Lacouture</surname>, <given-names>P.</given-names></string-name>, <string-name name-style="western"><surname>Blandin</surname>, <given-names>Y.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Bidet-Ildei</surname>, <given-names>C.</given-names></string-name> (<year>2018</year>). <article-title>Plavimop: How to standardize and simplify the use of point-light displays</article-title>. <source>Behavior Research Methods</source>, <volume>51</volume>(<issue>6</issue>), <fpage>2573</fpage>–<lpage>2596</lpage>. <pub-id pub-id-type="doi">10.3758/s13428-018-1112-x</pub-id></mixed-citation></ref>
	<ref id="bib20"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>D’Innocenzo</surname>, <given-names>G.</given-names></string-name>, <string-name name-style="western"><surname>Gonzalez</surname>, <given-names>C. C.</given-names></string-name>, <string-name name-style="western"><surname>Williams</surname>, <given-names>A. M.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Bishop</surname>, <given-names>D. T.</given-names></string-name> (<year>2016</year>). <article-title>Looking to learn: The effects of visual guidance on observational learning of the golf swing</article-title>. <source>PLoS ONE</source>, <volume>11</volume>(<issue>5</issue>), <elocation-id>e0155442</elocation-id>. <pub-id pub-id-type="doi">10.1371/journal.pone.0155442</pub-id></mixed-citation></ref>	
	<ref id="bib20.5"><mixed-citation publication-type="web">DOCoMETRe. (2025). <italic>fbuloup/DOCoMETRe: Data acquisition software</italic> GitHub. <ext-link ext-link-type="uri" xlink:href="https://github.com/fbuloup/DOCoMETRe">https://github.com/fbuloup/DOCoMETRe</ext-link></mixed-citation></ref>
	
	<ref id="bib20.75"><mixed-citation publication-type="web">Francisco, V., Bidet-Ildei, C., &amp; Arnaud, D. (2025). <italic>PLAViMoP and Eye Tracking: A method to integrate 2D gaze data within a C3D file of a point-light display</italic> [OSF project page containing datasets, headlines, additional online materials, and code]. Open Science Framework. <pub-id pub-id-type="doi">10.17605/OSF.IO/ND64P</pub-id></mixed-citation></ref>
		
	<ref id="bib21"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Francisco</surname>, <given-names>V.</given-names></string-name>, <string-name name-style="western"><surname>Decatoire</surname>, <given-names>A.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Bidet-Ildei</surname>, <given-names>C.</given-names></string-name> (<year>2022</year>). <article-title>Action observation and motor learning: The role of action observation in learning judo techniques</article-title>. <source>European Journal of Sport Science</source>, <volume>23</volume>(<issue>3</issue>), <fpage>319</fpage>–<lpage>329</lpage>. <pub-id pub-id-type="doi">10.1080/17461391.2022.2036816</pub-id></mixed-citation></ref>
	<ref id="bib22"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Francisco</surname>, <given-names>V.</given-names></string-name>, <string-name name-style="western"><surname>Louis</surname>, <given-names>F.</given-names></string-name>, <string-name name-style="western"><surname>David</surname>, <given-names>R.</given-names></string-name>, <string-name name-style="western"><surname>Billot</surname>, <given-names>M.</given-names></string-name>, <string-name name-style="western"><surname>Rouquette</surname>, <given-names>A.-L.</given-names></string-name>, <string-name name-style="western"><surname>Broc</surname>, <given-names>L.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Bidet-Ildei</surname>, <given-names>C.</given-names></string-name> (<year>2023</year>). <article-title>Point-light display: A new tool to improve verb recovery in patients with aphasia? A pilot study</article-title>. <source>Experimental Brain Research</source>, <volume>241</volume>, <fpage>1329</fpage>–<lpage>1337</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-023-06607-8</pub-id></mixed-citation></ref>
	<ref id="bib23"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Hassoumi</surname>, <given-names>A.</given-names></string-name>, <string-name name-style="western"><surname>Peysakhovich</surname>, <given-names>V.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Hurter</surname>, <given-names>C.</given-names></string-name> (<year>2019</year>). <article-title>Improving eye-tracking calibration accuracy using symbolic regression</article-title>. <source>PLoS ONE</source>, <volume>14</volume>(<issue>3</issue>), <elocation-id>e0213675</elocation-id>. <pub-id pub-id-type="doi">10.1371/journal.pone.0213675</pub-id></mixed-citation></ref>
	<ref id="bib24"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Hooge</surname>, <given-names>I. T. C.</given-names></string-name>, <string-name name-style="western"><surname>Hessels</surname>, <given-names>R. S.</given-names></string-name>, <string-name name-style="western"><surname>Niehorster</surname>, <given-names>D. C.</given-names></string-name>, <string-name name-style="western"><surname>Andersson</surname>, <given-names>R.</given-names></string-name>, <string-name name-style="western"><surname>Skrok</surname>, <given-names>M. K.</given-names></string-name>, <string-name name-style="western"><surname>Konklewski</surname>, <given-names>R.</given-names></string-name>, <string-name name-style="western"><surname>Stremplewski</surname>, <given-names>P.</given-names></string-name>, <string-name name-style="western"><surname>Nowakowski</surname>, <given-names>M.</given-names></string-name>, <string-name name-style="western"><surname>Tamborski</surname>, <given-names>S.</given-names></string-name>, <string-name name-style="western"><surname>Szkulmowska</surname>, <given-names>A.</given-names></string-name>, <string-name name-style="western"><surname>Szkulmowski</surname>, <given-names>M.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Nyström</surname>, <given-names>M.</given-names></string-name> (<year>2024</year>). <article-title>Eye tracker calibration: How well can humans refixate a target?</article-title> <source>Behavior Research Methods</source>, <volume>57</volume>(<issue>1</issue>), <elocation-id>23</elocation-id>. <pub-id pub-id-type="doi">10.3758/s13428-024-02564-4</pub-id></mixed-citation></ref>
	<ref id="bib25"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Horn</surname>, <given-names>R. R.</given-names></string-name>, <string-name name-style="western"><surname>Williams</surname>, <given-names>A. M.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Scott</surname>, <given-names>M. A.</given-names></string-name> (<year>2002</year>). <article-title>Learning from demonstrations: The role of visual search during observational learning from video and point-light models</article-title>. <source>Journal of Sports Sciences</source>, <volume>20</volume>(<issue>3</issue>), <fpage>253</fpage>–<lpage>269</lpage>. <pub-id pub-id-type="doi">10.1080/026404102317284808</pub-id></mixed-citation></ref>
	<ref id="bib26"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Izawa</surname>, <given-names>J.</given-names></string-name>, <string-name name-style="western"><surname>Pekny</surname>, <given-names>S. E.</given-names></string-name>, <string-name name-style="western"><surname>Marko</surname>, <given-names>M. K.</given-names></string-name>, <string-name name-style="western"><surname>Haswell</surname>, <given-names>C. C.</given-names></string-name>, <string-name name-style="western"><surname>Shadmehr</surname>, <given-names>R.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Mostofsky</surname>, <given-names>S. H.</given-names></string-name> (<year>2012</year>). <article-title>Motor learning relies on integrated sensory inputs in ADHD, but over-selectively on proprioception in autism spectrum conditions</article-title>. <source>Autism Research: Official Journal of the International Society for Autism Research</source>, <volume>5</volume>(<issue>2</issue>), <fpage>124</fpage>–<lpage>136</lpage>. <pub-id pub-id-type="doi">10.1002/aur.1222</pub-id></mixed-citation></ref>
	<ref id="bib27"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Johansson</surname>, <given-names>G.</given-names></string-name> (<year>1973</year>). <article-title>Visual perception of biological motion and a model for its analysis</article-title>. <source>Perception &amp; Psychophysics</source>, <volume>14</volume>(<issue>2</issue>), <fpage>201</fpage>–<lpage>211</lpage>. <pub-id pub-id-type="doi">10.3758/BF03212378</pub-id> </mixed-citation></ref>
	<ref id="bib28"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Kasprowski</surname>, <given-names>P.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Harężlak</surname>, <given-names>K.</given-names></string-name> (<year>2017</year>). <article-title>Gaze self-similarity plot — A new visualization technique</article-title>. <source>Journal of Eye Movement Research</source>, <volume>10</volume>(<issue>5</issue>), <fpage>1</fpage>–<lpage>14</lpage>. <pub-id pub-id-type="doi">10.16910/jemr.10.5.3</pub-id></mixed-citation></ref>
	<ref id="bib29"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>King</surname>, <given-names>A. J.</given-names></string-name>, <string-name name-style="western"><surname>Bol</surname>, <given-names>N.</given-names></string-name>, <string-name name-style="western"><surname>Cummins</surname>, <given-names>R. G.</given-names></string-name>, &amp; <string-name name-style="western"><surname>John</surname>, <given-names>K. K.</given-names></string-name> (<year>2019</year>). <article-title>Improving visual behavior research in communication science: An overview, review, and reporting recommendations for using eye-tracking methods</article-title>. <source>Communication Methods and Measures</source>, <volume>13</volume>(<issue>3</issue>), <fpage>149</fpage>–<lpage>177</lpage>. <pub-id pub-id-type="doi">10.1080/19312458.2018.1558194</pub-id></mixed-citation></ref>
	<ref id="bib30"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Koerfer</surname>, <given-names>K.</given-names></string-name>, <string-name name-style="western"><surname>Watson</surname>, <given-names>T.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Lappe</surname>, <given-names>M.</given-names></string-name> (<year>2024</year>). <article-title>Inability to pursue nonrigid motion produces instability of spatial perception</article-title>. <source>Science Advances</source>, <volume>10</volume>(<issue>45</issue>), <elocation-id>eadp6204</elocation-id>. <pub-id pub-id-type="doi">10.1126/sciadv.adp6204</pub-id></mixed-citation></ref>
	<ref id="bib31"><mixed-citation publication-type="proceedings"><string-name name-style="western"><surname>Li</surname>, <given-names>Y.</given-names></string-name>, <string-name name-style="western"><surname>Guo</surname>, <given-names>L.</given-names></string-name>, <string-name name-style="western"><surname>Sun</surname>, <given-names>G.</given-names></string-name>, <string-name name-style="western"><surname>Fu</surname>, <given-names>R.</given-names></string-name>, <string-name name-style="western"><surname>Yan</surname>, <given-names>Z.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Liang</surname>, <given-names>J.</given-names></string-name> (<year>2025</year>). <article-title>Eye tracking calibration based on smooth pursuit with regulated visual guidance</article-title>. <source>Proceedings of the 14th International Joint Conference on Computational Intelligence - ROBOVIS</source>, (pp. 417–425). <pub-id pub-id-type="doi">10.5220/0011524900003332</pub-id></mixed-citation></ref>
	<ref id="bib32"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Lorenzi</surname>, <given-names>E.</given-names></string-name>, <string-name name-style="western"><surname>Nadalin</surname>, <given-names>G.</given-names></string-name>, <string-name name-style="western"><surname>Morandi-Raikova</surname>, <given-names>A.</given-names></string-name>, <string-name name-style="western"><surname>Mayer</surname>, <given-names>U.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Vallortigara</surname>, <given-names>G.</given-names></string-name> (<year>2024</year>). <article-title>Noncortical coding of biological motion in newborn chicks’ brain</article-title>. <source>Cerebral Cortex</source>, <volume>34</volume>(<issue>6</issue>), <elocation-id>bhae262</elocation-id>. <pub-id pub-id-type="doi">10.1093/cercor/bhae262</pub-id></mixed-citation></ref>
	<ref id="bib33"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Ma</surname>, <given-names>Y.</given-names></string-name>, <string-name name-style="western"><surname>Paterson</surname>, <given-names>H. M.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Pollick</surname>, <given-names>F. E.</given-names></string-name> (<year>2006</year>). <article-title>A motion capture library for the study of identity, gender, and emotion perception from biological motion</article-title>. <source><italic>Behavior Research Methods</italic></source>, <volume>38</volume>(<issue>1</issue>), <fpage>134</fpage>–<lpage>141</lpage>. <pub-id pub-id-type="doi">10.3758/bf03192758</pub-id></mixed-citation></ref>
	<ref id="bib34"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Mahmood</surname>, <given-names>N.</given-names></string-name>, <string-name name-style="western"><surname>Ghorbani</surname>, <given-names>N.</given-names></string-name>, <string-name name-style="western"><surname>Troje</surname>, <given-names>N. F.</given-names></string-name>, <string-name name-style="western"><surname>Pons-Moll</surname>, <given-names>G.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Black</surname>, <given-names>M. J.</given-names></string-name> (<year>2019</year>). <article-title>AMASS: Archive of Motion Capture as Surface Shapes</article-title>. <source>Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)</source>. <ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/1904.03278">https://arxiv.org/abs/1904.03278</ext-link> </mixed-citation></ref>
	<ref id="bib35"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Manera</surname>, <given-names>V.</given-names></string-name>, <string-name name-style="western"><surname>Schouten</surname>, <given-names>B.</given-names></string-name>, <string-name name-style="western"><surname>Becchio</surname>, <given-names>C.</given-names></string-name>, <string-name name-style="western"><surname>Bara</surname>, <given-names>B. G.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Verfaillie</surname>, <given-names>K.</given-names></string-name> (<year>2010</year>). <article-title>Inferring intentions from biological motion: A stimulus set of point-light communicative interactions</article-title>. <source>Behavior Research Methods</source>, <volume>42</volume>(<issue>1</issue>), <fpage>168</fpage>–<lpage>178</lpage>. <pub-id pub-id-type="doi">10.3758/BRM.42.1.168</pub-id></mixed-citation></ref>
	<ref id="bib36"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Manera</surname>, <given-names>V.</given-names></string-name>, <string-name name-style="western"><surname>von der Lühe</surname>, <given-names>T.</given-names></string-name>, <string-name name-style="western"><surname>Schilbach</surname>, <given-names>L.</given-names></string-name>, <string-name name-style="western"><surname>Verfaillie</surname>, <given-names>K.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Becchio</surname>, <given-names>C.</given-names></string-name> (<year>2016</year>). <article-title>Communicative interactions in point-light displays: Choosing among multiple response alternatives</article-title>. <source>Behavior Research Methods</source>, <volume>48</volume>(<issue>4</issue>), <fpage>1580</fpage>–<lpage>1590</lpage>. <pub-id pub-id-type="doi">10.3758/s13428-015-0669-x</pub-id></mixed-citation></ref>
	<ref id="bib37"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Mazza</surname>, <given-names>M.</given-names></string-name>, <string-name name-style="western"><surname>Lucci</surname>, <given-names>G.</given-names></string-name>, <string-name name-style="western"><surname>Pacitti</surname>, <given-names>F.</given-names></string-name>, <string-name name-style="western"><surname>Pino</surname>, <given-names>M. C.</given-names></string-name>, <string-name name-style="western"><surname>Mariano</surname>, <given-names>M.</given-names></string-name>, <string-name name-style="western"><surname>Casacchia</surname>, <given-names>M.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Roncone</surname>, <given-names>R.</given-names></string-name> (<year>2010</year>). <article-title>Could schizophrenic subjects improve their social cognition abilities only with observation and imitation of social situations?</article-title> <source>Neuropsychological Rehabilitation</source>, <volume>20</volume>(<issue>5</issue>), <fpage>675</fpage>–<lpage>703</lpage>. <pub-id pub-id-type="doi">10.1080/09602011.2010.486284</pub-id></mixed-citation></ref>
	<ref id="bib38"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Okruszek</surname>, <given-names>Ł.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Chrustowicz</surname>, <given-names>M.</given-names></string-name> (<year>2020</year>). <article-title>Social perception and interaction database — A novel tool to study social cognitive processes with point-light displays</article-title>. <source>Frontiers in Psychiatry</source>, <volume>11</volume>, <elocation-id>123</elocation-id>. <pub-id pub-id-type="doi">10.3389/fpsyt.2020.00123</pub-id></mixed-citation></ref>
	<ref id="bib39"><mixed-citation publication-type="proceedings"><string-name name-style="western"><surname>Pfeuffer</surname>, <given-names>K.</given-names></string-name>, <string-name name-style="western"><surname>Vidal</surname>, <given-names>M.</given-names></string-name>, <string-name name-style="western"><surname>Turner</surname>, <given-names>J.</given-names></string-name>, <string-name name-style="western"><surname>Bulling</surname>, <given-names>A.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Gellersen</surname>, <given-names>H.</given-names></string-name> (<year>2013</year>). <article-title>Pursuit calibration: Making gaze calibration less tedious and more flexible</article-title>. <source>Proceedings of the 26th Annual ACM Symposium on User Interface Software and Technology</source>, (pp. 261–270). <pub-id pub-id-type="doi">10.1145/2501988.2501998</pub-id></mixed-citation></ref>
	<ref id="bib40"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Piwek</surname>, <given-names>L.</given-names></string-name>, <string-name name-style="western"><surname>Petrini</surname>, <given-names>K.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Pollick</surname>, <given-names>F.</given-names></string-name> (<year>2016</year>). <article-title>A dyadic stimulus set of audiovisual affective displays for the study of multisensory, emotional, social interactions</article-title>. <source>Behavior Research Methods</source>, <volume>48</volume>(<issue>4</issue>), <fpage>1285</fpage>–<lpage>1295</lpage>. <pub-id pub-id-type="doi">10.3758/s13428-015-0654-4</pub-id></mixed-citation></ref>
		
	<ref id="bib40.5"><mixed-citation publication-type="web">PLAViMoP. (2025). <italic>Point Light Action Visualization and Modification Platform</italic>. Université de Poitiers.  <ext-link ext-link-type="uri" xlink:href="https://plavimop.prd.fr/en/motions">https://plavimop.prd.fr/en/motions</ext-link></mixed-citation></ref>
		
<ref id="bib41"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Pollick</surname>, <given-names>F. E.</given-names></string-name>, <string-name name-style="western"><surname>Kay</surname>, <given-names>J. W.</given-names></string-name>, <string-name name-style="western"><surname>Heim</surname>, <given-names>K.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Stringer</surname>, <given-names>R.</given-names></string-name> (<year>2005</year>). <article-title>Gender recognition from point-light walkers</article-title>. <source>Journal of Experimental Psychology: Human Perception and Performance</source>, <volume>31</volume>(<issue>6</issue>), <fpage>1247</fpage>–<lpage>1265</lpage>.</mixed-citation></ref>
	<ref id="bib42"><mixed-citation publication-type="proceedings"><string-name name-style="western"><surname>Rahman</surname>, <given-names>Y.</given-names></string-name>, <string-name name-style="western"><surname>Asish</surname>, <given-names>S. M.</given-names></string-name>, <string-name name-style="western"><surname>Fisher</surname>, <given-names>N. P.</given-names></string-name>, <string-name name-style="western"><surname>Bruce</surname>, <given-names>E. C.</given-names></string-name>, <string-name name-style="western"><surname>Kulshreshth</surname>, <given-names>A. K.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Borst</surname>, <given-names>C. W.</given-names></string-name> (<year>2020</year>). <article-title>Exploring eye gaze visualization techniques for identifying distracted students in educational VR</article-title>. <source>2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)</source>, (pp. 868–877). <pub-id pub-id-type="doi">10.1109/VR46266.2020.00009</pub-id></mixed-citation></ref>
	<ref id="bib43"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Regolin</surname>, <given-names>L.</given-names></string-name>, <string-name name-style="western"><surname>Tommasi</surname>, <given-names>L.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Vallortigara</surname>, <given-names>G.</given-names></string-name> (<year>2000</year>). <article-title>Visual perception of biological motion in newly hatched chicks as revealed by an imprinting procedure</article-title>. <source><italic>Animal Cognition</italic></source>, <volume>3</volume>, <fpage>53</fpage>–<lpage>60</lpage>. <pub-id pub-id-type="doi">10.1007/s100710050050</pub-id> </mixed-citation></ref>
	<ref id="bib44"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Rizzolatti</surname>, <given-names>G.</given-names></string-name>, <string-name name-style="western"><surname>Fabbri-Destro</surname>, <given-names>M.</given-names></string-name>, <string-name name-style="western"><surname>Nuara</surname>, <given-names>A.</given-names></string-name>, <string-name name-style="western"><surname>Gatti</surname>, <given-names>R.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Avanzini</surname>, <given-names>P.</given-names></string-name> (<year>2021</year>). <article-title>The role of mirror mechanism in the recovery, maintenance, and acquisition of motor abilities</article-title>. <source>Neuroscience &amp; Biobehavioral Reviews</source>, <volume>127</volume>, <fpage>404</fpage>–<lpage>423</lpage>. <pub-id pub-id-type="doi">10.1016/j.neubiorev.2021.04.024</pub-id></mixed-citation></ref>
	<ref id="bib45"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Ross</surname>, <given-names>P. D.</given-names></string-name>, <string-name name-style="western"><surname>Polson</surname>, <given-names>L.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Grosbras</surname>, <given-names>M.-H.</given-names></string-name> (<year>2012</year>). <article-title>Developmental changes in emotion recognition from full-light and point-light displays of body movement</article-title>. <source>PLoS ONE</source>, <volume>7</volume>(<issue>9</issue>), <elocation-id>e44815</elocation-id>. <pub-id pub-id-type="doi">10.1371/journal.pone.0044815</pub-id></mixed-citation></ref>
	<ref id="bib46"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Runeson</surname>, <given-names>S.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Frykholm</surname>, <given-names>G.</given-names></string-name> (<year>1983</year>). <article-title>Kinematic specification of dynamics as an informational basis for person and action perception: Expectation, gender recognition, and deceptive intention</article-title>. <source>Journal of Experimental Psychology: General</source>, <volume>112</volume>(<issue>4</issue>), <fpage>585</fpage>–<lpage>615</lpage>. <pub-id pub-id-type="doi">10.1037/0096-3445.112.4.585</pub-id></mixed-citation></ref>
	<ref id="bib47"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Ryan</surname>, <given-names>D.</given-names></string-name>, <string-name name-style="western"><surname>Fullen</surname>, <given-names>B.</given-names></string-name>, <string-name name-style="western"><surname>Rio</surname>, <given-names>E.</given-names></string-name>, <string-name name-style="western"><surname>Segurado</surname>, <given-names>R.</given-names></string-name>, <string-name name-style="western"><surname>Stokes</surname>, <given-names>D.</given-names></string-name>, &amp; <string-name name-style="western"><surname>O’Sullivan</surname>, <given-names>C.</given-names></string-name> (<year>2021</year>). <article-title>Effect of action observation therapy in the rehabilitation of neurologic and musculoskeletal conditions: A systematic review</article-title>. <source>Archives of Rehabilitation Research and Clinical Translation</source>, <volume>3</volume>(<issue>1</issue>), <elocation-id>100106</elocation-id>. <pub-id pub-id-type="doi">10.1016/j.arrct.2021.100106</pub-id></mixed-citation></ref>
	<ref id="bib48"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Sarasso</surname>, <given-names>E.</given-names></string-name>, <string-name name-style="western"><surname>Gemma</surname>, <given-names>M.</given-names></string-name>, <string-name name-style="western"><surname>Agosta</surname>, <given-names>F.</given-names></string-name>, <string-name name-style="western"><surname>Filippi</surname>, <given-names>M.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Gatti</surname>, <given-names>R.</given-names></string-name> (<year>2015</year>). <article-title>Action observation training to improve motor function recovery: A systematic review</article-title>. <source>Archives of Physiotherapy</source>, <volume>5</volume>(<issue>1</issue>), <elocation-id>14</elocation-id>. <pub-id pub-id-type="doi">10.1186/s40945-015-0013-x</pub-id></mixed-citation></ref>
	<ref id="bib49"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Sevdalis</surname>, <given-names>V.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Keller</surname>, <given-names>P. E.</given-names></string-name> (<year>2011</year>). <article-title>Perceiving performer identity and intended expression intensity in point-light displays of dance</article-title>. <source>Psychological Research</source>, <volume>75</volume>(<issue>5</issue>), <fpage>423</fpage>–<lpage>434</lpage>. <pub-id pub-id-type="doi">10.1007/s00426-010-0312-5</pub-id></mixed-citation></ref>
	<ref id="bib50"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Shi</surname>, <given-names>Y.</given-names></string-name>, <string-name name-style="western"><surname>Ma</surname>, <given-names>X.</given-names></string-name>, <string-name name-style="western"><surname>Ma</surname>, <given-names>Z.</given-names></string-name>, <string-name name-style="western"><surname>Wang</surname>, <given-names>J.</given-names></string-name>, <string-name name-style="western"><surname>Yao</surname>, <given-names>N.</given-names></string-name>, <string-name name-style="western"><surname>Gu</surname>, <given-names>Q.</given-names></string-name>, <string-name name-style="western"><surname>Wang</surname>, <given-names>C.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Gao</surname>, <given-names>Z.</given-names></string-name> (<year>2018</year>). <article-title>Using a kinect sensor to acquire biological motion: Toolbox and evaluation</article-title>. <source>Behavior Research Methods</source>, <volume>50</volume>(<issue>2</issue>), <fpage>518</fpage>–<lpage>529</lpage>. <pub-id pub-id-type="doi">10.3758/s13428-017-0883-9</pub-id></mixed-citation></ref>
	<ref id="bib51"><mixed-citation publication-type="web"><string-name name-style="western"><surname>Shipley</surname>, <given-names>T. F.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Brumberg</surname>, <given-names>J. S.</given-names></string-name> (<year>2004</year>). <italic>Markerless motion-capture for point-light displays</italic>. Bournemouth University. <ext-link ext-link-type="uri" xlink:href="https://nccastaff.bournemouth.ac.uk/hncharif/MathsCGs/Desktop/Research/Markless%20Motion%20Capture/MarkerlessMoCap-02.pdf">https://nccastaff.bournemouth.ac.uk/hncharif/MathsCGs/Desktop/Research/Markless%20Motion%20Capture/MarkerlessMoCap-02.pdf</ext-link></mixed-citation></ref>
	<ref id="bib52"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Simion</surname>, <given-names>F.</given-names></string-name>, <string-name name-style="western"><surname>Regolin</surname>, <given-names>L.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Bulf</surname>, <given-names>H.</given-names></string-name> (<year>2008</year>). <article-title>A predisposition for biological motion in the newborn baby</article-title>. <source>Proceedings of the National Academy of Sciences of the United States of America</source>, <volume>105</volume>(<issue>2</issue>), <fpage>809</fpage>–<lpage>813</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.0707021105</pub-id></mixed-citation></ref>
	<ref id="bib53"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Souto</surname>, <given-names>D.</given-names></string-name>, <string-name name-style="western"><surname>Sudkamp</surname>, <given-names>J.</given-names></string-name>, <string-name name-style="western"><surname>Nacilla</surname>, <given-names>K.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Bocian</surname>, <given-names>M.</given-names></string-name> (<year>2023</year>). <article-title>Tuning in to a hip-hop beat: Pursuit eye movements reveal processing of biological motion</article-title>. <source>Human Movement Science</source>, <volume>91</volume>, <elocation-id>103126</elocation-id>. <pub-id pub-id-type="doi">10.1016/j.humov.2023.103126</pub-id></mixed-citation></ref>
	
	
	<ref id="bib53.25"><mixed-citation publication-type="web">SR Research Support Forum. (2025a). <italic>EyeLink Development Kit</italic>. SR Research. <ext-link ext-link-type="uri" xlink:href="https://www.sr-research.com/support/forum-3.htm">https://www.sr-research.com/support/forum-3.htm"</ext-link></mixed-citation></ref>
	<ref id="bib53.5"><mixed-citation publication-type="web">SR Research Support Forum. (2025b). <italic>Matlab mex files folder</italic>. Google Code | Archive. <ext-link ext-link-type="uri" xlink:href="https://www.sr-research.com/support/thread-28.html">https://www.sr-research.com/support/thread-28.html</ext-link></mixed-citation></ref>
	
	
	
	
	
	<ref id="bib54"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Sundstedt</surname>, <given-names>V.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Garro</surname>, <given-names>V.</given-names></string-name> (<year>2022</year>). <article-title>A systematic review of visualization techniques and analysis tools for eye-tracking in 3d environments</article-title>. <source>Frontiers in Neuroergonomics</source>, <volume>3</volume>, <elocation-id>910019</elocation-id>. <pub-id pub-id-type="doi">10.3389/fnrgo.2022.910019</pub-id></mixed-citation></ref>
	<ref id="bib55"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Thomas</surname>, <given-names>L. E.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Seiffert</surname>, <given-names>A. E.</given-names></string-name> (<year>2010</year>). <article-title>Self-motion impairs multiple-object tracking</article-title>. <source>Cognition</source>, <volume>117</volume>(<issue>1</issue>), <fpage>80</fpage>–<lpage>86</lpage>. <pub-id pub-id-type="doi">10.1016/j.cognition.2010.07.002</pub-id></mixed-citation></ref>
	<ref id="bib56"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Vallortigara</surname>, <given-names>G.</given-names></string-name>, <string-name name-style="western"><surname>Regolin</surname>, <given-names>L.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Marconato</surname>, <given-names>F.</given-names></string-name> (<year>2005</year>). <article-title>Visually inexperienced chicks exhibit spontaneous preference for biological motion patterns</article-title>. <source>PLoS Biology</source>, <volume>3</volume>(<issue>7</issue>), <elocation-id>e208</elocation-id>. <pub-id pub-id-type="doi">10.1371/journal.pbio.0030208</pub-id></mixed-citation></ref>
	<ref id="bib57"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Vanrie</surname>, <given-names>J.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Verfaillie</surname>, <given-names>K.</given-names></string-name> (<year>2004</year>). <article-title>Perception of biological motion: A stimulus set of human point-light actions</article-title>. <source>Behavior Research Methods</source>, <volume>36</volume>(<issue>4</issue>), <fpage>625</fpage>–<lpage>629</lpage>. <pub-id pub-id-type="doi">10.3758/BF03206542</pub-id></mixed-citation></ref>
	<ref id="bib58"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Venesvirta</surname>, <given-names>H.</given-names></string-name>, <string-name name-style="western"><surname>Surakka</surname>, <given-names>V.</given-names></string-name>, <string-name name-style="western"><surname>Gizatdinova</surname>, <given-names>Y.</given-names></string-name>, <string-name name-style="western"><surname>Lylykangas</surname>, <given-names>J.</given-names></string-name>, <string-name name-style="western"><surname>Špakov</surname>, <given-names>O.</given-names></string-name>, <string-name name-style="western"><surname>Verho</surname>, <given-names>J.</given-names></string-name>, <string-name name-style="western"><surname>Vetek</surname>, <given-names>A.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Lekkala</surname>, <given-names>J.</given-names></string-name> (<year>2016</year>). <article-title>Emotional reactions to point-light display animations</article-title>. <source>Interacting with Computers</source>, <volume>28</volume>(<issue>4</issue>), <fpage>521</fpage>–<lpage>531</lpage>. <pub-id pub-id-type="doi">10.1093/iwc/iwv028</pub-id></mixed-citation></ref>
	<ref id="bib59"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Villatte</surname>, <given-names>J.</given-names></string-name>, <string-name name-style="western"><surname>Taconnat</surname>, <given-names>L.</given-names></string-name>, <string-name name-style="western"><surname>Bidet-Ildei</surname>, <given-names>C.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Toussaint</surname>, <given-names>L.</given-names></string-name> (<year>2022</year>). <article-title>The role of implicit motor simulation on action verb memory</article-title>. <source>Psychological Research</source>, <volume>87</volume>, <fpage>441</fpage>–<lpage>451</lpage>. <pub-id pub-id-type="doi">10.1007/s00426-022-01671-1</pub-id></mixed-citation></ref>
	<ref id="bib60"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Vogt</surname>, <given-names>S.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Thomaschke</surname>, <given-names>R.</given-names></string-name> (<year>2007</year>). <article-title>From visuo-motor interactions to imitation learning: Behavioural and brain imaging studies</article-title>. <source>Journal of Sports Sciences</source>, <volume>25</volume>(<issue>5</issue>), <fpage>497</fpage>–<lpage>517</lpage>. <pub-id pub-id-type="doi">10.1080/02640410600946779</pub-id></mixed-citation></ref>
	<ref id="bib61"><mixed-citation publication-type="journal"><string-name name-style="western"><surname>Yousefi</surname>, <given-names>B.</given-names></string-name>, &amp; <string-name name-style="western"><surname>Loo</surname>, <given-names>C. K.</given-names></string-name> (<year>2019</year>). <article-title>Biologically-inspired computational neural mechanism for human action/activity recognition: A review</article-title>. <source>Electronics</source>, <volume>8</volume>(<issue>10</issue>), <elocation-id>1169</elocation-id>. <pub-id pub-id-type="doi">10.3390/electronics8101169</pub-id>.</mixed-citation></ref>
</ref-list>
	<sec sec-type="data-availability" id="das"><title>Data Availability</title>
		<p>Data, headlines, code, and additional online materials are openly available on the project’s Open Science Framework page (see <xref ref-type="bibr" rid="bib20.75">Francisco et al., 2025</xref>).</p>
	</sec>	

	<sec sec-type="supplementary-material" id="sp1"><title>Supplementary Materials</title>
		<table-wrap position="anchor">
			<table frame='void' style="background-#f3f3f3">
				<col width="60%" align="left"/>
				<col width="40%" align="left"/>
				<thead>
					<tr>
						<th>Type of supplementary materials</th>
						<th>Availability/Access</th>
					</tr>
				</thead>
				<tbody>
					<tr>
						<th colspan="2">Data</th>						
					</tr>
					<tr>
						<td>a. Go2Cam.m.</td>
						<td><xref ref-type="bibr" rid="bib20.75">Francisco et al. (2025)</xref></td>
					</tr>
					<tr>
						<td>b. ICT-Stimulus.c3d.</td>
						<td><xref ref-type="bibr" rid="bib20.75">Francisco et al. (2025)</xref></td>
					</tr>
					<tr style="grey-border-top-dashed">
						<th colspan="2">Code</th>
					</tr>
					<tr>
						<td>Code Stimulus and ICT.</td>
						<td><xref ref-type="bibr" rid="bib20.75">Francisco et al. (2025)</xref></td>
					</tr>										
					<tr style="grey-border-top-dashed">
						<th colspan="2">Material</th>
					</tr>
					<tr>
						<td>a. Generate_ICT_and_Stimulus_Videos_and_c3d.Asv.</td>
						<td><xref ref-type="bibr" rid="bib20.75">Francisco et al. (2025)</xref></td>
					</tr>
					<tr>
						<td>b. Generate_ICT_and_Stimulus_Videos_and_c3d.M.</td>
						<td><xref ref-type="bibr" rid="bib20.75">Francisco et al. (2025)</xref></td>
					</tr>
					<tr>					
						<td>a. Experimental C3D and videos.</td>
						<td><xref ref-type="bibr" rid="bib20.75">Francisco et al. (2025)</xref></td>
					</tr>
					<tr style="grey-border-top-dashed">
						<th colspan="2">Study/Analysis preregistration</th>
					</tr>	
					<tr>
						<td>The study was not preregistered.</td>
						<td>&mdash;</td>
					</tr>
					<tr style="grey-border-top-dashed">
						<th colspan="2">Other</th>
					</tr>	
					<tr>
						<td>a. Tutorial.</td>
						<td><xref ref-type="bibr" rid="bib20.75">Francisco et al. (2025)</xref></td>
					</tr>
					<tr>
						<td>b. DOCoMETRe setup files.</td>
						<td><xref ref-type="bibr" rid="bib20.75">Francisco et al. (2025)</xref></td>
					</tr>
				</tbody>
			</table>
		</table-wrap>		
	</sec>
			

<fn-group>
<fn fn-type="financial-disclosure"><p>The authors have no funding to report.</p></fn>
</fn-group>
<fn-group>
<fn fn-type="conflict"><p>The authors have declared that no competing interests exist.</p></fn>
</fn-group>
<ack>
<p>The authors have no additional (i.e., non-financial) support to report.</p>
</ack>
</back>
</article>
