<script src="https://bibbase.org/show?bib=www.cse.yorku.ca/percept/papers/self.bib&proxy=1&group0=year&group1=type&simplegroups=1&folding=1&theme=default&style=https://percept.cse.yorku.ca/cgi-bin/default.css&jsonp=1"></script>
<?php
$contents = file_get_contents("https://bibbase.org/show?bib=www.cse.yorku.ca/percept/papers/self.bib&proxy=1&group0=year&group1=type&simplegroups=1&folding=1&theme=default&style=https://percept.cse.yorku.ca/cgi-bin/default.css");
print_r($contents);
?>
<iframe src="https://bibbase.org/show?bib=www.cse.yorku.ca/percept/papers/self.bib&proxy=1&group0=year&group1=type&simplegroups=1&folding=1&theme=default&style=https://percept.cse.yorku.ca/cgi-bin/default.css"></iframe>
For more details see the documention.
To the site owner:
Action required! Mendeley is changing its API. In order to keep using Mendeley with BibBase past April 14th, you need to:
@article{Palmisano:rs, abstract = {When we move our head while in virtual reality, the display lag will generate differences in virtual and physical head pose (known as DVP). While DVP are a major trigger for cybersickness, theories differ as to exactly how they constitute a provocative sensory conflict. Here we test two competing theories: the subjective vertical conflict theory and the DVP hypothesis. Thirty-two HMD users made continuous, oscillatory head rotations in either pitch or yaw while viewing a large virtual room. Additional display lag was applied selectively to the simulation about the same, or an orthogonal, axis to the instructed head rotation (generating Yaw-Lag+Yaw-Move, Yaw-Lag+PitchMove, Pitch-Lag+Yaw-Move, and Pitch-Lag+Pitch-Move conditions). At the end of each trial: 1) participants rated their sickness severity and scene instability; and 2) their head tracking data was used to estimate DVP throughout the trial. Consistent with our DVP hypothesis, but contrary to subjective vertical conflict theory, YawLag+Yaw-Move conditions induced significant cybersickness, which was similar in magnitude to that in the PitchLag+Pitch-Move conditions. When extra lag was added along the same axis as the instructed head movement, DVP was found to predict 73 to 76\% of the variance in sickness severity (with measures of the spatial magnitude and the temporal dynamics of the DVP both contributing significantly). Ratings of scene instability were also found to predict sickness severity. Taken together, these findings suggest that: 1) cybersickness can be predicted from objective estimates of the DVP; and 2) provocative stimuli for this sickness can be identified from subjective reports of scene instability. }, author = {Stephen Palmisano and Lance Stephenson and Rodney G Davies and Juno Kim and Robert S Allison}, date-added = {2023-10-07 19:57:43 -0400}, date-modified = {2024-01-21 10:39:23 -0500}, doi = {10.1007/s10055-023-00909-6}, journal = {Virtual Reality}, keywords = {Augmented & Virtual Reality}, number = {22}, pages = {22.1-22.28}, title = {Testing `differences in virtual and physical head pose' and `subjective vertical conflict' accounts of cybersickness}, volume = {28}, year = {2024}, url-1 = {https://doi.org/10.1007/s10055-023-00909-6}}
@article{Palmisano:0aa, annote = {Poitiers, France}, author = {Stephen Palmisano and Robert S. Allison and Rodney G Davies and Peter Wagner and Juno Kim}, date-added = {2023-11-30 07:30:35 -0500}, date-modified = {2024-01-21 10:43:38 -0500}, doi = {10.1080/10447318.2023.2291613}, journal = {International Journal of Human-Computer Interaction}, keywords = {Augmented & Virtual Reality}, title = {Effects of constant and time-varying display lag on DVP and cybersickness when making head-movements in virtual reality}, year = {2023}, url-1 = {https://doi.org/10.1080/10447318.2023.2291613}}
@article{Lee:os, author = {Abigail R. I. Lee and Laurie M. Wilcox and Robert S. Allison}, date-added = {2023-10-05 15:29:53 -0400}, date-modified = {2023-10-05 15:29:53 -0400}, doi = {10.1167/jov.23.12.2}, journal = {Journal of Vision}, keywords = {Depth perception}, number = {12}, pages = {Article 2}, title = {Perceiving depth and motion in depth from successive occlusion}, volume = {23}, year = {2023}, url-1 = {https://doi.org/10.1167/jov.23.12.2}}
@article{Bury:oy, author = {Bury, N. and Jenkin, M. R. M. and Allison, R. S. and Herpers, R. and Harris, L. R.}, date-added = {2023-06-10 06:26:05 -0400}, date-modified = {2023-06-10 06:26:05 -0400}, doi = {10.1038/s41526-023-00282-3}, journal = {NPJ Microgravity}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {42.1-42.10}, title = {Vection underwater illustrates the limitations of neutral buoyancy as a microgravity analog}, volume = {9}, year = {2023}, url-1 = {https://doi.org/10.1038/s41526-023-00282-3}}
@article{palmisano_differences_2022, abstract = {During head-mounted display (HMD)-based virtual reality (VR), head movements and motion-to-photon-based display lag generate differences in our virtual and physical head pose (referred to as DVP). We propose that large-amplitude, time-varying patterns of DVP serve as the primary trigger for cybersickness under such conditions. We test this hypothesis by measuring the sickness and estimating the DVP experienced under different levels of experimentally imposed display lag (ranging from 0 to 222 ms on top of the VR system's 4 ms baseline lag). On each trial, seated participants made continuous, oscillatory head rotations in yaw, pitch or roll while viewing a large virtual room with an Oculus Rift CV1 HMD (head movements were timed to a computer-generated metronome set at either 1.0 or 0.5 Hz). After the experiment, their head-tracking data were used to objectively estimate the DVP during each trial. The mean, peak, and standard deviation of these DVP data were then compared to the participant's cybersickness ratings for that trial. Irrespective of the axis, or the speed, of the participant's head movements, the severity of their cybersickness was found to increase with each of these three DVP summary measures. In line with our DVP hypothesis, cybersickness consistently increased with the amplitude and the variability of our participants' DVP. DVP similarly predicted their conscious experiences during HMD VR---such as the strength of their feelings of spatial presence and their perception of the virtual scene's stability.}, author = {Palmisano, Stephen and Allison, Robert S. and Teixeira, Joel and Kim, Juno}, date-added = {2022-12-19 08:38:20 -0500}, date-modified = {2023-10-07 20:07:42 -0400}, doi = {10.1007/s10055-022-00732-5}, journal = {Virtual Reality}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {2}, title = {Differences in virtual and physical head orientation predict sickness during active head-mounted display-based virtual reality}, volume = {27}, year = {2023}, url-1 = {https://doi.org/10.1007/s10055-022-00732-5}}
@incollection{Bury:2023zl, annote = {67. Nils-Alexander Bury, Laurence R. Harris2,3, Michael Jenkin, Robert S. Allison, Sandra Felsner1, & Rainer Herpers (2023) The Effect of Gravity on Human Self-Motion Perception: Implications for Space Mission Safety and Training. Deutscher Luft- und Raumfahrtkongress 2023 https://dlrk2023.dglr.de/}, author = {Bury, N. and Harris, L. R. and Jenkin, M. R. M. and Allison, R. S. and Felsner, S. and Herpers, R.}, booktitle = {Deutscher Luft- und Raumfahrtkongress}, date-added = {2023-11-16 16:00:29 -0500}, date-modified = {2023-11-16 16:02:05 -0500}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, title = {The Effect of Gravity on Human Self-Motion Perception: Implications for Space Mission Safety and Training}, year = {2023}, url-1 = {https://teap2022.uni-koeln.de/sites/teap2022/user_upload/TeaP2022_AbstractBooklet.pdf}}
@incollection{Teng:2023aa, abstract = {Motion parallax provides information for both absolute distance and relative depth judgments. For a given head motion and given depth interval, the parallactic change is inversely proportional to the square of egocentric distance. In this presentation we will discuss analysis of a subset of data from a larger study. On each trial, monocularly-viewing observers made left-right swaying head motions at 1.0 Hz to induce the corresponding virtual motion shown on a head mounted display. A gain distortion was applied to the virtual motion, ranging from half to twice the physical motion. While moving, observers adjusted the angle of a vertical fold stimulus presented at distances from 1.3 to 6.0 m so it appeared to be at 90 deg. After the adjustment was made another virtual environment was presented. While standing stationary observers matched a pole to the apparent distance of the peak of the previously seen fold. On average observers adjusted the folds to have smaller depth as gain increased or distance decreased. Estimates of target distance also declined with increasing gain. As both distance and gain affect the amount of parallactic change we analysed to what extent our results could be explained by this variable alone. Our analysis confirmed that both of these measures varied consistently with parallactic change. We will discuss the implications of these findings for depth cue scaling, and for anticipated tolerance to tracking errors in virtual reality systems. }, author = {Teng, X. and Wilcox, L. M. and Allison, R. S.}, booktitle = {Proccedings of the Scottish Vision Group Meeting}, date-added = {2023-08-30 10:56:03 -0400}, date-modified = {2023-08-30 10:57:49 -0400}, keywords = {Stereopsis}, title = {Increasing parallactic change compresses depth and perceived distance}, url = {https://psyresearch.abertay.ac.uk/SVG2023/Abstracts.htm}, year = {2023}, url-1 = {https://psyresearch.abertay.ac.uk/SVG2023/Abstracts.htm}}
@incollection{Guo:2023aa, abstract = {The perception of self-motion can be induced or enhanced by exposure to visual stimuli such as optic flow. It has also been shown that consistent stereoscopic information enhances visually-induced self-motion perception (vection). Conversely, does vection affect the observer's ability to parse the flow? And if so, how does it interact with binocular stereopsis? To investigate, we presented participants a scene including a target, a fixation cross, floor, ceiling, and pillars to provide optic flow using a wide-field, stereoscopic, immersive display. Participants virtually moved forward or backward at 1.4 m/s, either while continuously viewing the scene to produce vection or when it was only displayed during the 500 ms trial (the no-vection condition). The target was presented initially at eye level, and moved obliquely upward in a sagittal-parallel plane. The target's velocity in depth was adjusted by adaptive staircases to obtain the bias and sensitivity. The task was to indicate whether the target moved obliquely forward or backward in the scene. The stimuli were presented in three viewing conditions: stereoscopic condition, synoptic condition, and monocular condition, to explore the possible interaction between vection and stereoscopic information. While all participants verbally reported that they experienced more vection with the vection condition, the result showed that the bias was slightly but significantly (F(1,127)=5.0217, p<.027) higher with vection (1.279 m/s) than without vection (1.219 m/s). This means vection did not help on reducing the flow parsing bias. Furthermore, we did not find any significant interaction effect between vection conditions and viewing conditions.}, author = {Guo, H. and Allison, R. S.}, booktitle = {Journal of Vision (VSS Abstracts)}, date-added = {2023-08-30 10:46:37 -0400}, date-modified = {2023-08-30 10:48:05 -0400}, doi = {10.1167/jov.23.9.4721}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {4721}, title = {Vection does not facilitate flow parsing}, volume = {23}, year = {2023}, url-1 = {https://doi.org/10.1167/jov.22.14.3575}}
@incollection{Au:ab, abstract = {Normally we integrate ordinal (occlusion) and metric (e.g., binocular disparity) depth information to obtain a unified percept of 3D layout. Further, quantitative depth must be available to the proprioceptive and motor systems to support interaction with nearby objects. Here we take a step towards understanding how occlusion and binocular disparity combine in the control of visually-guided reaching. We developed a novel conflict paradigm set in a real-world environment in which participants placed a virtual ring around a post positioned at one of several distances (34, 41.5, and 49 cm). The ring was fixed to the index fingertip (with lateral and vertical offsets to avoid finger collisions with the post). If the ring collided with the post, the ring changed colour and the trial restarted. We assessed performance with monocular and binocular viewing using both virtual and physical posts (N=10). The consistency of the occlusion was manipulated such that when the post was physical it never occluded the ring, even when correctly positioned around the post. This resulted in conflicting disparity and occlusion information between the post and further portion of the ring. Conversely, in virtual post conditions, occlusion of the ring by the post was consistent. We found that ring placement was less precise when occlusion and disparity information were inconsistent. Participants also required more attempts to complete the task under the conflict compared to consistent conditions. While this pattern of results was similar for binocular and monocular viewing, observers performed worse and required more attempts when doing the task with one eye. Our results underscore the importance of binocular depth information in performing visuo-motor tasks. However, even when such precise quantitative depth information is available, ordinal depth cues can significantly impact both perception and action, despite these latter cues only providing binary signals to the success of visually-guided action.}, author = {Au, D. and Allison, R. S. and Wilcox, L. M.}, date-added = {2023-08-30 10:45:14 -0400}, date-modified = {2023-08-30 10:54:28 -0400}, doi = {10.1167/jov.23.9.5154}, keywords = {Stereopsis}, pages = {5154}, publisher = {Journal of Vision (VSS Abstracts)}, title = {Conflicting ordinal depth information interferes with visually-guided reaching}, volume = {23}, year = {2023}, url-1 = {https://doi.org/10.1167/jov.23.9.5154}}
@incollection{Teng:aa, abstract = {When moving about the world, humans rely on visual, proprioceptive and vestibular cues to perceive depth and distance. Normally, these sources of information are consistent. However, what happens if we receive conflicting information about how far we have moved? A previous study reported that at distances of 1.3 to 1.5 m, portrayed binocular 3D shape was not affected by motion gain; however, apparent distance and monocular depth settings were influenced. In our study, we extended the range of distances to 1.5 to 6 m. A VR headset was used to display gain distortions binocularly and monocularly to one eye. Observers swayed from side to side through 20 cm at 0.5 Hz to the beat of a metronome. The simulated virtual motion was varied by a gain of 0.5 to 2.0 times the physical motion. Observers first adjusted a vertical fold until its sides appeared to form a 90-degree angle. The fold then disappeared and they indicated its remembered distance by adjusting the position of a virtual pole. In the monocular condition as gain increased, observers provided increasingly compressed fold depth settings at 1.5 and 3 but not at 6 m. Under binocular viewing, increasing gain compressed distance but not object shape settings. To ensure that the weak binocular effects were not due to failure to perceive the gain, we separately assessed gain discrimination thresholds using the fold stimulus. We found that observers were sensitive to the manipulation over this range and tended to perceive a gain of 1.1 as having no motion distortion under both viewing conditions. It is clear from our data that monocular viewing of kinesthetic/visual mismatch results in significant variations in portrayed depth of the fold. These effects can be somewhat mitigated by increasing viewing distance, but even more so by viewing with both eyes.}, author = {Teng, X. and Allison, R. S. and Wilcox, L. M.}, booktitle = {Journal of Vision, VSS Abstracts}, date-added = {2023-08-30 10:45:14 -0400}, date-modified = {2023-08-30 10:49:59 -0400}, doi = {10.1167/jov.23.9.5015}, keywords = {Stereopsis}, pages = {5015}, publisher = {VSS}, title = {Increasing motion parallax gain compresses space and 3D object shape}, volume = {23}, year = {2023}, url-1 = {https://doi.org/10.1167/jov.23.9.5015}}
@incollection{Bury:2023mb, author = {Nils-Alexander Bury and Laurence R. Harris and Michael Jenkin and Robert S. Allison and Timo Frett and Sandra Felsner and Elef Schellen and Rainer Herpers}, booktitle = {International Multisensory Research Forum}, date-added = {2023-08-13 09:23:14 -0400}, date-modified = {2023-08-13 09:23:14 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {171}, title = {The Illusion of Tilt: Does Your Sex Define Your Perception of Upright?}, year = {2023}}
@incollection{Schellen:2023so, annote = {June 27-30/ 2023 in Brussels}, author = {Schellen, E and Ark, E and Jenkin, M and Allison, R.S. and Bury, N and Herpers, R and Harris LR.}, booktitle = {International Multisensory Research Forum}, date-added = {2023-08-13 09:22:27 -0400}, date-modified = {2023-08-13 09:22:27 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {093}, title = {The effect of postural orientation around the pitch axis on the haptic perception of vertical}, year = {2023}}
@incollection{Jorges:yg, abstract = {Gravity influences the perception of size although the mechanism remains unclear. Some authors have suggested that gravity might serve as a reference frame for visual judgements. If so, then in the absence of this persistent frame of reference size judgements should be less precise in microgravity. Twelve astronauts (6 women and 6 men) were tested before space flight, within 6 days of arrival on the ISS, approximately 90 days after arrival, within 6 days of return to Earth, and more than 60 days after return. They judged the height of a visually fronto-parallel square presented in VR at 6, 12 and 18 m relative to a bar held in their hands aligned with the long axis of the body. The cube's height was varied trial to trial via an adaptive staircase. We found no significant differences in precision or bias between any of the space sessions and before they flew. However, when collapsing across test sessions, astronauts perceived the cube to be significantly larger in space than when upright (p = 0.01) or supine (p = 0.017) on Earth which was mainly driven by the cube being perceived as smaller (p = 0.002) after having been back on Earth for 60 days compared to their first session. The lack of effect of microgravity on precision makes it unlikely that the gravity-as-reference-frame hypothesis can explain posture-related perceptual size changes observed on Earth. However, space exposure does seem to create lasting changes in perceptual processing.}, annote = {June 27-30/ 2023 in Brussels}, author = {Jorges, B and Bury, N. and McManus, M and , Bansal, A. and Allison, R. S. and Jenkin, M. R. M. and Harris, L. R.}, booktitle = {International Multisensory Research Forum}, date-added = {2023-08-13 09:21:09 -0400}, date-modified = {2023-08-13 09:21:09 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {066}, title = {Precision and Bias in the Perception of Object Size in Microgravity}, year = {2023}}
@inproceedings{Wagner:aa, author = {Peter Wagner and Juno Kim and Robert S. Allison and Stephen Palmisano3}, booktitle = {SA '23: SIGGRAPH Asia 2023 Posters}, date-added = {2023-11-29 10:55:13 -0500}, date-modified = {2023-11-29 10:55:13 -0500}, doi = {10.1145/3610542.3626139}, keywords = {Augmented & Virtual Reality}, pages = {1--3}, title = {Quantifying display lag and its effects during Head-Mounted Display based Virtual Reality}, volume = {Article 29}, year = {2023}, url-1 = {https://doi.org/10.1145/3610542.3626139}}
@inproceedings{Abadi:it, annote = {Paris 9-13 October 2023)}, author = {Abadi, R. and Wilcox, L. M. and Allison, R. S.}, booktitle = {ICMI '23: 25th ACM International Conference on Multimodal Interaction}, date-added = {2023-08-02 13:15:01 -0400}, date-modified = {2023-10-07 20:24:54 -0400}, doi = {10.1145/3577190.3614107}, keywords = {Augmented & Virtual Reality}, pages = {622--630}, title = {Recreating theWater-Level Task in Augmented Reality}, year = {2023}, url-1 = {https://doi.org/10.1145/3577190.3614107}}
@inproceedings{Shodipe:xe, annote = {Regina, SK, Canada}, author = {Oluwaseyi Elizabeth Shodipe and Robert S. Allison}, booktitle = {2023 IEEE Canadian Conference on Electrical and Computer Engineering (CCECE)}, date-added = {2023-08-02 13:13:46 -0400}, date-modified = {2024-01-21 10:45:28 -0500}, doi = {10.1109/CCECE58730.2023.1028900}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {570-575}, title = {Modelling the relationship between the objective measures of car sickness}, year = {2023}, url-1 = {https://doi.org/10.1109/CCECE58730.2023.1028900}}
@inproceedings{Mohona:wq, author = {Mohona, S.S. and Au, D. and Wilcox, L. M. and Allison, R. S.}, booktitle = {IEEE Workshop on Multimedia Signal Processing}, date-added = {2023-08-02 13:13:06 -0400}, date-modified = {2024-01-21 10:43:09 -0500}, doi = {10.1109/MMSP59012.2023.10337720}, keywords = {Image Quality}, pages = {1-6}, title = {The Subjective Quality of Stereoscopic 3D Video Following Display Stream Compression}, year = {2023}, url-1 = {https://doi.org/10.1109/MMSP59012.2023.10337720}}
@inproceedings{Gregor:2023pz, abstract = {Abstract---Consistent with the International Maritime Organisation's roadmap for regulating the operation of autonomous surface ships, most concepts of operations for crewed and uncrewed autonomous shipping rely on monitoring and operation from a Remote Control Centre (RCC). The successful execution of such activities requires that operators have adequate Situational Awareness (SA), while avoiding situations of information overload, and the right amount of, or calibrated, Trust in the system. In this study, we examined how operator SA and Trust were affected by different levels of Immersion of the human-machine interface. Simulated RCC interfaces were constructed for a scenario where an autonomous container ship traversed the arctic escorted by robotic aids. SA, Trust, and Motion Sickness (MS) were tracked over time. Different Virtual Reality (VR) technologies were used to represent three levels of Immersion: Non-Immersive VR (NVR), Semi-Immersive VR (SVR), and Immersive VR (IVR). The results illustrated various trade-offs -- with NVR shown to be less taxing, SVR showing several potential benefits for SA, and IVR showing a strong relationship between Trust and SA accuracy, but increased MS. These results suggest that Immersion is an important factor in Situational Awareness and Trust in automation; future research should consider both the extent of Immersion, potential for MS, and the format of delivery (e.g. head-mounted displays versus immersive projection displays). Understanding these trade-offs between levels of Immersion is a requisite step for designing RCCs.}, annote = {Limerick, 5-8 June 2023}, author = {Gregor, A. and Allison, R. S. and Heffner, K.}, booktitle = {IEEE OCEANS Conference}, date-added = {2023-05-18 08:34:24 -0400}, date-modified = {2023-10-14 14:56:29 -0400}, doi = {10.1109/OCEANSLimerick52467.2023.10244249}, keywords = {Augmented & Virtual Reality}, pages = {1-10}, title = {Exploring the Impact of Immersion on Situational Awareness and Trust in Remotely Monitored Maritime Autonomous Surface Ships}, year = {2023}, url-1 = {https://doi.org/10.1109/OCEANSLimerick52467.2023.10244249}}
@inproceedings{Teng:2023uq, abstract = {Virtual reality (VR) is distinguished by the rich, multimodal, immersive sensory information and affordances provided to the user. However, when moving about an immersive virtual world the visual display often conflicts with other sensory cues due to design, the nature of the simulation, or to system limitations (for example impoverished vestibular motion cues during acceleration in racing games). Given that conflicts between sensory cues have been associated with disorientation or discomfort, and theoretically could distort spatial perception, it is important that we understand how and when they are manifested in the user experience. To this end, this set of experiments investigates the impact of mismatch between physical and virtual motion parallax on the perception of the depth of an apparently perpendicular dihedral \textcolor{\hlt}{angle (a fold)} and its distance. We applied gain distortions between visual and kinesthetic head motion during lateral sway movements and measured the effect of gain on depth, distance and lateral space compression. We found that under monocular viewing, observers made smaller object depth and distance settings especially when the gain was greater than 1. Estimates of target distance declined with increasing gain under monocular viewing. Similarly, mean set depth decreased with increasing gain under monocular viewing, except at 6.0 m. The effect of gain was minimal when observers viewed the stimulus binocularly. Further, binocular viewing (stereopsis) improved the precision but not necessarily the accuracy of gain perception. Overall, the lateral compression of space was similar in the stereoscopic and monocular test conditions. Taken together, our results show that the use of large presentation distances (at $6$ m) combined with binocular cues to depth and distance enhanced humans' tolerance to visual and kinesthetic mismatch.}, annote = {Shanghai Mar 2023}, author = {Teng, X. and Allison, R. S. and Wilcox, L. M.}, booktitle = {Proceedings IEEE Virtual Reality 2023}, date-added = {2023-05-18 08:34:24 -0400}, date-modified = {2023-05-18 08:35:23 -0400}, doi = {10.1109/VR55154.2023.00055}, keywords = {Stereopsis}, pages = {398-408}, publisher = {IEEE VR}, title = {Manipulation of Motion Parallax Gain Distorts Perceived Distance and Object Depth in Virtual Reality}, year = {2023}, url-1 = {https://doi.org/10.1109/VR55154.2023.00055}}
@article{tong2022modeling, author = {Tong, Jonathan and Wilcox, Laurie M and Allison, Robert S}, date-added = {2022-07-06 11:47:53 -0400}, date-modified = {2022-11-28 10:00:05 -0500}, doi = {10.1109/TVCG.2022.3203098}, journal = {IEEE Transactions on Visualization and Computer Graphics}, keywords = {Augmented & Virtual Reality}, number = {11}, pages = {3759-3766}, title = {The impacts of lens and stereo camera separation on perceived slant in Virtual Reality head-mounted displays}, volume = {28}, year = {2022}, url-1 = {https://doi.org/10.1109/TVCG.2022.3203098}}
@article{Hartle:wl, author = {Hartle, B. and Sudhama-Joseph, A. and Irving, E. L. and Allison, R. S. and Glaholt, M. and Wilcox, L. M.}, date-added = {2022-06-03 14:50:46 -0400}, date-modified = {2022-12-20 09:37:54 -0500}, doi = {10.1167/jov.22.8.6}, journal = {Journal of Vision}, keywords = {Stereopsis}, number = {8}, pages = {6.1-6.13}, title = {Shape judgements in natural scenes: Convexity biases vs. stereopsis}, volume = {22}, year = {2022}, url-1 = {https://doi.org/10.1167/jov.22.8.6}}
@incollection{Tong:2022qt, author = {Tong, J. and Wilcox, L. M. and Allison, Robert S}, booktitle = {From Picture to Reality, from Observer to Agent. Vision Research Conference, Second Student Centre, York University, June 6-9, 2022}, date-added = {2023-10-14 15:34:45 -0400}, date-modified = {2023-10-14 15:35:16 -0400}, doi = {10.25071/10315/39491}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {92}, title = {Stereoscopic Distortions When Viewing Geometry Does Not Match Inter-Pupillary Distance}, year = {2022}, url-1 = {https://doi.org/10.25071/10315/39491}}
@incollection{Teng:2022nq, author = {Teng, X. and Wilcox, L. M. and Allison, Robert S}, booktitle = {From Picture to Reality, from Observer to Agent. Vision Research Conference, Second Student Centre, York University, June 6-9, 2022}, date-added = {2023-10-14 15:33:50 -0400}, date-modified = {2023-10-14 15:34:21 -0400}, doi = {10.25071/10315/39491}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {91}, title = {Binocular Depth And Distance Cues Enhance Tolerance To Virtual Motion Gain}, year = {2022}, url-1 = {https://doi.org/10.25071/10315/39491}}
@incollection{Mohona:2022dd, author = {Mohona, S.S. and Au, D. and Wilcox, L. M. and Allison, Robert S}, booktitle = {From Picture to Reality, from Observer to Agent. Vision Research Conference, Second Student Centre, York University, June 6-9, 2022}, date-added = {2023-10-14 15:32:45 -0400}, date-modified = {2023-10-14 15:33:37 -0400}, doi = {10.25071/10315/39491}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {77}, title = {Objective And Subjective Impact Of Chromatic Aberration Compensation On Compression Artifacts}, year = {2022}, url-1 = {https://doi.org/10.25071/10315/39491}}
@incollection{Kio:2022lp, author = {Kio, O. G. and Allison, Robert S}, booktitle = {From Picture to Reality, from Observer to Agent. Vision Research Conference, Second Student Centre, York University, June 6-9, 2022}, date-added = {2023-10-14 15:31:49 -0400}, date-modified = {2023-10-14 15:32:25 -0400}, doi = {10.25071/10315/39491}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {68}, title = {Simulated Motion In Virtual Environments Affects Cognitive Task Performance}, year = {2022}, url-1 = {https://doi.org/10.25071/10315/39491}}
@incollection{Keyvanara:2022xz, author = {Keyvanara, M. and Allison, Robert S}, booktitle = {From Picture to Reality, from Observer to Agent. Vision Research Conference, Second Student Centre, York University, June 6-9, 2022}, date-added = {2023-10-14 15:30:16 -0400}, date-modified = {2023-10-14 15:30:55 -0400}, doi = {10.25071/10315/39491}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {66}, title = {Detectability Of Image Transformations During Eye And Head Movements}, year = {2022}, url-1 = {https://doi.org/10.25071/10315/39491}}
@incollection{Guo:2022px, author = {Guo, H. and Allison, Robert S}, booktitle = {From Picture to Reality, from Observer to Agent. Vision Research Conference, Second Student Centre, York University, June 6-9, 2022}, date-added = {2023-10-14 15:29:03 -0400}, date-modified = {2023-10-14 15:31:04 -0400}, doi = {10.25071/10315/39491}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {62}, title = {Effect Of Binocular Disparity On Flow Parsing}, year = {2022}, url-1 = {https://doi.org/10.25071/10315/39491}}
@incollection{Gregor:2022wu, author = {Gregor, A. and Allison, Robert S and Kio, O. G. and Heffner, K.}, booktitle = {From Picture to Reality, from Observer to Agent. Vision Research Conference, Second Student Centre, York University, June 6-9, 2022}, date-added = {2023-10-14 15:27:54 -0400}, date-modified = {2023-10-14 15:28:47 -0400}, doi = {10.25071/10315/39491}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {61}, title = {Exploring The Impact Of Immersion On Situational Awareness And Trust In Teleoperated Maritime Autonomous Surface Ship}, year = {2022}, url-1 = {https://doi.org/10.25071/10315/39491}}
@incollection{Au:2022ye, author = {Au, D. and Tong, J. and Allison, Robert S and Wilcox, L. M.}, booktitle = {From Picture to Reality, from Observer to Agent. Vision Research Conference, Second Student Centre, York University, June 6-9, 2022}, date-added = {2023-10-14 15:26:02 -0400}, date-modified = {2023-10-14 15:27:06 -0400}, doi = {10.25071/10315/39491}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {50}, title = {Incompatible Occlusion And Binocular Disparity Cause Systematic Localization Errors In Augmented Reality}, year = {2022}, url-1 = {https://doi.org/10.25071/10315/39491}}
@incollection{Abadi:2022qo, author = {Abadi, R.l and Allison, Robert S}, booktitle = {From Picture to Reality, from Observer to Agent. Vision Research Conference, Second Student Centre, York University, June 6-9, 2022}, date-added = {2023-10-14 15:24:36 -0400}, date-modified = {2023-10-14 15:57:28 -0400}, doi = {10.25071/10315/39491}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {47}, title = {Implementing The Water Level Task In Augmented Reality}, year = {2022}, url-1 = {https://doi.org/10.25071/10315/39491}}
@incollection{Bury:2022ij, annote = {TeaP 2022 (Tagung experimentell arbeitender Psycholog:innen; Conference of Experimental Psychologists) will take place in Cologne from 20-23 of March 2022}, author = {Bury, N. and Harris, L. R. and Jenkin, M. R. M. and Allison, R. S. and Felsner, S. and Herpers, R.}, booktitle = {TeaP 2022 (64th Tagung experimentell arbeitender Psychologinnen; Conference of Experimental Psychologists)}, date-added = {2023-08-30 11:04:00 -0400}, date-modified = {2023-08-30 11:04:00 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {241-242}, title = {The Influence of Gravity on Perceived Travel Distance in Virtual Reality}, url = {https://teap2022.uni-koeln.de/sites/teap2022/user_upload/TeaP2022_AbstractBooklet.pdf}, year = {2022}, url-1 = {https://teap2022.uni-koeln.de/sites/teap2022/user_upload/TeaP2022_AbstractBooklet.pdf}}
@incollection{Teng:2022hh, abstract = {In natural environments, motion parallax (from visual direction and optic flow) supports both depth and distance perception. What happens if we do not know how far we have moved or receive conflicting information? We manipulated motion gain using a VR headset and a two-phase task to assess perceived depth and distance. Observers first viewed a ``fold'' stimulus, a wall-oriented dihedral angle covered in Voronoi texture. The task was to adjust the dihedral angle until it appeared to be 90 degrees (perpendicular). We occluded the top and bottom edges of the fold and varied the width to make the edges of the fold uninformative. On each trial, following the angle adjustment, a second scene appeared which contained a pole that extended from a ground plane. In this phase, the task was to match the position of the pole to the remembered position of the apex of the previously seen fold. We tested observers binocularly and monocularly in two motion conditions (stationary and moving). When moving, observers swayed laterally through 20 cm in time to a 0.5 Hz metronome; the motion gain varied from 0.5 to 2.0 times the actual self-motion. We found that increased gain caused an increase in the adjusted angle or equivalently a decrease in associated depth of the fold, especially when viewed monocularly. In addition, perceived distance decreased with increasing gain, irrespective of viewing condition. That is, the fold was perceived as smaller and closer when gain was larger than 1. The effect of the gain manipulation was much weaker under binocular viewing. These data show that perceptual distortions due to differences between actual and virtual head motion are compensated for by binocular, and to a lesser extent monocular, depth and distance cues. These flexible compensatory mechanisms make the human visual system highly tolerant of visual/kinesthetic mismatch.}, author = {Teng, X. and Wilcox, L. M. and Allison, R. S.}, booktitle = {Journal of Vision (VSS Abstracts)}, date-added = {2022-12-15 18:24:50 -0500}, date-modified = {2022-12-15 18:25:24 -0500}, doi = {10.1167/jov.22.14.3312}, keywords = {Stereopsis}, pages = {3312}, title = {Binocular cues to depth and distance enhance tolerance to visual and kinesthetic mismatch}, volume = {22}, year = {2022}, url-1 = {https://doi.org/10.1167/jov.22.14.3312}}
@incollection{Tong:2022ca, abstract = {The relationship between depth and binocular cues (disparity and convergence) is defined by the distance separating the two eyes, also known as the inter-pupillary distance (IPD). This relationship is mapped in the visual system through experience and feedback, and adaptively recalibrated as IPD gradually increases during development. However, with the advent of stereoscopic-3D displays, situations may arise in which the visual system views content that is captured or rendered with a camera separation that differs from the viewer's own IPD; without feedback, this will likely result in a systematic and persistent misperception of depth. We tested this prediction using a VR headset in which the inter-axial separation of virtual cameras and the separation between the optics are coupled. Observers (n=15) were asked to adjust the angle between two intersecting textured-surfaces until it appeared to be 90$\,^{\circ}$, at each of three viewing distances. In the baseline condition the lens and camera separations matched each observer's IPD. In two `mismatch' conditions (tested in separate blocks) the lens and camera separations were set to the maximum (71 mm) and minimum (59 mm) allowed by the headset. We found that when the lens and camera separation were less than the viewer's IPD they exhibited compression of space; the adjusted angle was smaller than their baseline setting. The reverse pattern was seen when the lens and camera separation were larger than the viewer's IPD. Linear regression analysis supported these conclusions with a significant correlation between the magnitude of IPD mismatch and the deviation of angle adjustment relative to the baseline condition. We show that these results are well explained by a geometric model that considers the scaling of disparity and convergence due to shifts in virtual camera and optical inter-axial separations relative to an observer's IPD.}, author = {Tong, J. and Allison, R. S. and Wilcox, L. M.}, booktitle = {Journal of Vision (VSS Abstracts)}, date-added = {2022-12-15 18:24:50 -0500}, date-modified = {2022-12-15 18:25:27 -0500}, doi = {10.1167/jov.22.14.3564}, keywords = {Stereopsis}, pages = {3564}, title = {Stereoscopic distortions when viewing geometry does not match inter-pupillary distance}, volume = {22}, year = {2022}, url-1 = {https://doi.org/10.1167/jov.22.14.3564}}
@incollection{Guo:2022qc, abstract = {During locomotion, optic flow provides important information for detection, estimation and navigation. On the other hand, binocular disparity, which carries compelling depth information, can potentially aid optic flow parsing. We explored the effect of binocular disparity on observers' ability to detect object motion during simulated locomotion. Twelve participants were recruited and tested on our wide-field stereoscopic environment (WISE). The stimulus consisted of four spherical targets hovering in a pillar hallway, and it was presented in stereoscopic, synoptic (binocular but without disparity), and monocular viewing conditions. In each trial, one of the four targets moved either in depth (approaching or receding) or a direction parallel to the frontal plane (contracting or expanding). Participants detected the moving target during a simulated forward walking locomotion in a 4-alternative forced choice task. The locomotion speed was 1.4 m/s, and therefore the target motion was superimposed upon this optic flow. Adaptive staircases were adopted to obtain the thresholds of the target motion speed in each viewing condition. The results to date showed that participants' thresholds in the stereoscopic condition were 20 - 40\% lower (better) than those in the synoptic condition when detecting approaching targets, t(7) = 3.85, p = .006, receding targets, t(7) = 2.83,p = .025, and contracting targets, t(7) = 2.57, p = .036. Furthermore, only when detecting expanding targets, threshold performance was significantly better in the synoptic condition than that in the monocular condition, t(7) = 2.67, p = .032. These results suggested that during locomotion, binocular disparity facilitates optic flow parsing.}, author = {Guo, H. and Allison, R. S.}, booktitle = {Journal of Vision (VSS Abstracts)}, date-added = {2022-12-15 18:24:50 -0500}, date-modified = {2022-12-15 18:31:28 -0500}, doi = {10.1167/jov.22.14.3575}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {3575}, title = {Effect of Binocular Disparity on Detecting Target Motion during Locomotion}, volume = {22}, year = {2022}, url-1 = {https://doi.org/10.1167/jov.22.14.3575}}
@incollection{Kio:2022bx, abstract = {Compelling simulated motion in virtual environments can induce the sensation of self motion (or vection) in stationary observers. While the usefulness and functional significance of vection is still debated, the literature has shown that perceived magnitude of vection is lower when observers perform attentionally demanding cognitive tasks than when attentional demands are absent. Could simulated motion and the resulting vection experienced in virtual environments in turn affect how observers perform various attention demanding tasks? In this study therefore, we investigated how accurately and rapidly observers could perform attention-demanding aural and visual tasks while experiencing levels of vection-inducing motion in a virtual environment. Seventeen adult observers were exposed to different levels of simulated motion at virtual camera speeds of 0 (stationary), 5, 10 and 15 m/s in a straight virtual corridor rendered through a Vive-Pro Virtual Reality headset. During these simulations, they performed aural or visual discrimination tasks, or no task at all. We recorded the accuracy, the time observers took to respond to each task, and the intensity of vection they reported. Repeated Measures ANOVA showed that levels of simulated motion did not significantly affect accuracy on either task (F(3,48) = 1.469, p = .235 aural; F(3,48) = 1.504, p = .226 visual), but significantly affected the response times on aural tasks (F(3,48) = 4.320, p = .009 aural; F(3,48) = 0.916, p = .440 visual). Observers generally perceived less vection at all levels of motion when they performed visual discrimination tasks compared to when they had no task to perform (F(2,32) = 13.784, p = .038). This suggests that perceived intensities of vection are significantly reduced when people perform attentionally demanding tasks related to visual processing. Conversely, vection intensity or simulated motion speed can affect performance on aural tasks. }, author = {Kio, G. and Allison, R. S.}, booktitle = {Journal of Vision (VSS Abstracts)}, date-added = {2022-12-15 18:24:50 -0500}, date-modified = {2022-12-15 18:25:21 -0500}, doi = {10.1167/jov.22.14.3627}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {3627}, title = {Effects of simulated and perceived motion on cognitive task performance}, volume = {22}, year = {2022}, url-1 = {https://doi.org/10.1167/jov.22.14.3627}}
@incollection{Au:2022ng, abstract = {Under natural viewing conditions binocular disparity can provide metric depth information; many of the monocular depth cues, such as occlusion, provide depth order only. Nonetheless, when put in conflict there is evidence that occlusion can influence the direction and magnitude of perceived depth from stereopsis. Here we explored the integration of depth information from occlusion and binocular disparity in complex real-world environments using a depth matching paradigm. The virtual stimulus was a green letter `A' presented using a Microsoft HoloLens augmented reality (AR) display and superimposed on a real frontoparallel surface at 1.2 m. The letter was placed at one of eight positions -- between 0.9 and 1.6 m, including the surface location. Observers matched the distance of a probe to the perceived distance of the letter by moving it with a sliding pole. For comparison, observers performed the same task without the physical surface. Our results show that when the surface was absent or the letter was rendered in front of the surface the letter was accurately localized. However, when the letter was rendered beyond the surface, observers progressively underestimated the letter's distance, even though the relative disparity between the probe and the target should have been equally informative at all locations. This pattern of results suggests that 1) observers are unable to ignore conflicts between occlusion and binocular disparity and 2) the occlusion conflict biases the perceived position of the target in the direction of the occluder. Our results are well modelled using a Bayesian ideal observer with an asymmetric likelihood for an occlusion cue representing letter positions in front of vs beyond the surface. In addition to providing insight into the integration of ordinal and metric depth information, these results speak to the impact of such errors in AR on user interactions. }, author = {Au, D. and Tong, J. and Allison, R. S. and Wilcox, L. M.}, booktitle = {Journal of Vision (VSS Abstracts)}, date-added = {2022-12-15 18:24:50 -0500}, date-modified = {2022-12-15 18:25:17 -0500}, doi = {10.1167/jov.22.14.3739}, keywords = {Stereopsis}, pages = {3739}, title = {The impact of conflicting ordinal and metric depth information on depth matching}, volume = {22}, year = {2022}, url-1 = {https://doi.org/10.1167/jov.22.14.3739}}
@incollection{Kuo:2022fb, abstract = {Walking interfaces for Virtual Reality often produce proprioceptive, vestibular and somatosensory signals which conflict with the visual presentation of terrain conditions in virtual environments. We compared locomotion decisions made using a dual joystick gamepad with a walking-in-place metaphor. Each trial presented two choices where the visual path condition differed in one of the following aspects: (a) incline, (b) friction, (c) texture, and (d) width. Users chose one of these paths by using the locomotion interface to walk to a goal. Their decisions were recorded and analyzed as a generalized linear mixed model. The results suggest that the walking-in-place interface produces choices of visual conditions that more often reflect expectations of walking in the real world: decisions that minimize energy expended or risk of injury. Because of this, we can infer that different walking interfaces can produce different results in virtual reality experiments. Therefore, behavioral scientists should be wary that sensory discrepancies between visual presentation and other modalities can negatively affect the ecological validity of studies using virtual reality. Consideration should be taken designing these studies to ensure that sensory inputs are as natural and consistent between modalities as possible.}, author = {Kuo, C. and Allison, R. S.}, booktitle = {Journal of Vision (VSS Abstracts)}, date-added = {2022-12-15 18:24:50 -0500}, date-modified = {2022-12-15 18:25:34 -0500}, doi = {10.1167/jov.22.14.3826}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {3826}, title = {Locomotor decision-making altered by different walking interfaces in virtual reality}, volume = {22}, year = {2022}, url-1 = {https://doi.org/10.1167/jov.22.14.3826}}
@incollection{Bansal:2022ux, abstract = {One of the most common, and most complex functions of the human brain is to perceive our own motion. Estimating how far we have travelled is a multisensory process, although the relative contributions from our different sensory systems in estimating travel distance is still unknown. Testing astronauts in microgravity not only allows us to parse out the contributions from the different senses more easily, but it can also inform mission planners and trainers about how our perception of travel distance might change in microgravity. Using VR, we tested astronauts' (n=12, 6 female) perceived travel distance 5 times: once before their flight, twice in space (upon arrival and 3 months after), and twice again when they returned back to Earth (upon reentry and 2 months after). Preliminary results show no differences between the astronauts' estimations of travel distance after arriving to the ISS, after 3 months in space, or when they returned to Earth. These findings not only provide insights into the sensory contributions involved in making travel distance estimates, but also indicate that there is no adverse effect of long- duration exposure to microgravity on perceived travel distance.}, annote = {The symposium will start early morning on November 17, 2022, and end late afternoon on November 18, 2022 in Calgary}, author = {Bansal, A. T. and Jorges, B and Bury, N. and McManus, M. and Allison, R. S. and Jenkin, M. R. M. and Harris, L. R.}, booktitle = {2022 Scientific Abstracts: The First Canadian Space Health Research Symposium}, date-added = {2022-11-30 13:51:32 -0500}, date-modified = {2022-11-30 13:51:32 -0500}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {1}, title = {The perception of object size in microgravity}, year = {2022}}
@incollection{Jorges:2022ux, abstract = {Exposure to microgravity can influence the visual perception of object size, however the mechanism remains an object of debate. Gravity might serve as a reference frame in which visual information is interpreted. The absence of gravity should make size judgements then more variable due to the inability to anchor these judgements. We tested this hypothesis by assessing accuracy and variability of astronauts' size judgements before, during, and after a six-month or longer microgravity exposure in orbit. 12 astronauts were tested before take-off, within 7 days of arrival on the ISS, around 90 days after arrival, within 7 days of return to Earth and at least 60 days after return. We found that variability was, indeed, higher upon arrival on the ISS (p = 0.03), but not later during space flight. Further, astronauts but not control participants -- surprisingly -- perceived the object to be significantly smaller (p = 0.04) at their last test session than at their first session, suggesting lasting changes in their perception. Overall, our data provides additional support that gravity may indeed serve as a reference frame in which visual input is interpreted for size judgements.}, annote = {The symposium will start early morning on November 17, 2022, and end late afternoon on November 18, 2022 in Calgary}, author = {Jorges, B and Bury, N. and McManus, M. and Bansal, A. and Allison, R. S. and Jenkin, M. R. M. and Harris, L. R.}, booktitle = {2022 Scientific Abstracts: The First Canadian Space Health Research Symposium}, date-added = {2022-11-30 13:51:32 -0500}, date-modified = {2022-11-30 13:51:32 -0500}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {8}, title = {The perception of object size in microgravity}, year = {2022}}
@incollection{bury2022earth, author = {Bury, Nils-Alexander and Harris, Laurence R and Jenkin, Michael and Allison, Robert S and Felsner, Sandra and Herpers, Rainer}, booktitle = {From Picture to Reality, from Observer to Agent. Vision Research Conference, Second Student Centre, York University, June 6-9, 2022}, date-added = {2022-07-06 11:47:53 -0400}, date-modified = {2023-10-14 15:24:05 -0400}, doi = {10.25071/10315/39491}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {53}, title = {From Earth to Space: the Effect of Gravity and Sex on Self-Motion Perception}, year = {2022}, url-1 = {https://doi.org/10.25071/10315/39491}}
@incollection{Weinberg:pb, abstract = { Background and Aim: The ability to control and maintain an upright standing posture is crucial for humans interacting with their environment. Many factors, such as a fear of falling as observed when exposed to a postural threat [1], can cause changes in postural stability. The ability to quantify changes in postural stability is critical to understand psychological (and physiological) effects on balance. Therefore, the goal of the study is to use linear and nonlinear analyses to identify the effects of vision and postural threat on upright stance. Methods: This study involves re-examining the dataset previously reported [2]. This secondary analysis was conducted as the initial analysis did not examine the sway temporal dynamics. Twenty young healthy adults stood on a force plate mounted to a hydraulic lift at two height conditions, 0.8m (LOW) and 3.2m (HIGH). Both height conditions were performed with both eyes open (EO) and closed (EC). Participants stood quietly for 60 seconds on a force plate, and centre of pressure (COP) was calculated from ground reaction forces and moments. For the linear analyses, anterior-posterior COP root mean square (RMS) and mean power frequency (MPF) were calculated. For the nonlinear analysis, recurrence plots were generated from the COP data. These plots provided a visualization of the timepoints in which the trajectory returns to a location it has visited before. A recurrence quantification analysis (RQA) was then used to quantify the number and duration of recurrences. RQA measures included recurrence rate, determinism, entropy, and average diagonal line length. Results: For the linear analyses, COP RMS showed no effect of vision or of a vision-height interaction; however, a main effect of height was observed, with sway amplitude decreasing in the HIGH compared to LOW condition. For COP MPF, main effects were found for both height and vision, with frequency increasing in the HIGH compared to LOW condition, as well as increasing in EC compared to EO. For the nonlinear analysis, there was a main effect of both vision and height, with all RQA measures decreasing in the HIGH compared to LOW condition, and decreasing in EC compared to EO. Conclusions: Both linear and nonlinear analyses revealed differences across height and visual conditions. When standing at height, a decrease in amplitude and increase in frequency was observed, thought to resemble a stiffening strategy [1]. The decreases in RQA measures across height and visual conditions may provide additional evidence for a change in postural strategy. These changes observed across conditions might be suggestive of the participant trying to deliberately minimize their sway magnitude, but end up resulting in higher frequency and less predictable sway patterns. Given the nonlinear analysis identifies changes in visual (and height) conditions, this study shows a need to go beyond traditional linear measures when assessing balance. Nonlinear measures can enhance our understanding of postural stability and should be used in future analyses with the potential to identify changes that linear measures may not detect. References: [1] Carpenter et al., Exp Brain Res, 2001; [2] Cleworth & Carpenter, Neurosci Lett, 2016. Acknowledgements: Funded by VISTA and NSERC }, annote = {ISPGR World Congress 2022 JULY 3 -- 7, MONTREAL, CANADA P2-X-153 https://ispgr.org/wp-content/uploads/2022/06/ISPGR_Abstracts_June21.pdf}, author = {Sara Weinberg and Stephen Palmisano and Robert S. Allison and Taylor Cleworth}, booktitle = {International Society of Posture and Gait Research (ISPGR) World Congress 2022}, date-added = {2022-07-04 07:35:24 -0400}, date-modified = {2022-07-04 07:35:24 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {381-382}, title = {Nonlinear analysis of the effects of vision and postural threat on upright stance}, year = {2022}, bdsk-file-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAwLi4vLi4vLi4vLi4vLi4vRG93bmxvYWRzL0lTUEdSX1dlaW5iZXJnXzIwMjIucGRmTxEBcAAAAAABcAACAAAMTWFjaW50b3NoIEhEAAAAAAAAAAAAAAAAAAAAAAAAAEJEAAH/////F0lTUEdSX1dlaW5iZXJnXzIwMjIucGRmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP////8AAAAAAAAAAAAAAAAABQACAAAKIGN1AAAAAAAAAAAAAAAAAAlEb3dubG9hZHMAAAIAMS86VXNlcnM6YWxsaXNvbjpEb3dubG9hZHM6SVNQR1JfV2VpbmJlcmdfMjAyMi5wZGYAAA4AMAAXAEkAUwBQAEcAUgBfAFcAZQBpAG4AYgBlAHIAZwBfADIAMAAyADIALgBwAGQAZgAPABoADABNAGEAYwBpAG4AdABvAHMAaAAgAEgARAASAC9Vc2Vycy9hbGxpc29uL0Rvd25sb2Fkcy9JU1BHUl9XZWluYmVyZ18yMDIyLnBkZgAAEwABLwAAFQACAA7//wAAAAgADQAaACQAVwAAAAAAAAIBAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAHL}}
@incollection{tong_modeling_2022, abstract = {Projective geometry predicts that a mismatch between user interpupillary-distance (IPD) and the inter-axial separation of stereo cameras used to render imagery in VR will result in distortions of perceived scale. A potentially important, but often overlooked, consequence of a mismatch between user IPD and VR lens separation is the impact on binocular convergence. Here we describe a geometric model that incorporates shifts in binocular convergence due to the prismatic effect of decentered lenses, as well as the offset of dual displays relative to the eyes, and predicts biases in perceived slant. The model predicts that when the inter-lens and inter-display separation is less than an observer's IPD, perceived slant will be biased towards frontoparallel. Conversely when the inter-lens and inter-display separation is greater than an observer's IPD, perceived slant will be increased. These predictions were tested and confirmed in a VR headset with adjustable inter-lens and display separation (both coupled). In the experiment, observers completed a fold adjustment task in which they adjusted the angle between two intersecting, textured surfaces until they appeared to be perpendicular to one another. The task was performed at three randomly interleaved viewing distances, monocularly and binocularly. In separate blocks, the inter-lens and display separation was either matched to the observer's IPD (baseline condition) or set to the minimum or maximum allowed by the headset (IPD-mismatch conditions). When the inter-lens and display separation was less than the observers' IPD they underestimated surface slant relative to baseline, and the reverse pattern was seen when the inter-lens and display separation was greater than their IPD. Overall, the geometric model tended to overestimate the effect of IPD- mismatch on perceived slant, especially at the farther viewing distances. We extended the model to incorporate the relative weighting of monocular and binocular cues, resulting in an overall improvement in the model fits. Our model provides researchers and VR-systems- designers a means of predicting depth perception when the optics of head-mounted displays may not be aligned with users' eyes.}, author = {Tong, Jonathan and Wilcox, Laurie and Allison, Robert}, booktitle = {{MODVIS} {Workshop}}, date-added = {2022-05-12 13:58:12 -0400}, date-modified = {2022-07-04 07:34:41 -0400}, file = {Purdue e-Pubs - MODVIS Workshop\: Modeling the impacts of inter-display and inter-lens separation on perceived slant in Virtual Reality Head-mounted displays:/Users/robertallison/Zotero/storage/K8A2H8Q4/2.html:text/html}, keywords = {Augmented & Virtual Reality}, month = 05, title = {Modeling the impacts of inter-display and inter-lens separation on perceived slant in {Virtual} {Reality} {Head}-mounted displays}, url = {https://docs.lib.purdue.edu/modvis/2022/session02/2}, year = {2022}, url-1 = {https://docs.lib.purdue.edu/modvis/2022/session02/2}}
@inproceedings{mohona202275, author = {Mohona, Sanjida Sharmin and Au, Domenic and Wilcox, Laurie M and Allison, Robert S}, booktitle = {SID Symposium Digest of Technical Papers}, date-added = {2022-07-06 11:47:53 -0400}, date-modified = {2022-07-06 11:47:53 -0400}, keywords = {Image Quality}, number = {1}, pages = {1013--1016}, title = {75-2: The Effect of Chromatic Aberration Correction on Visually Lossless Compression}, volume = {53}, year = {2022}}
@article{Allison:2021vq, author = {Allison, R. S. and Johnston, J. M. and Wooster, M.}, date-added = {2021-08-04 22:03:17 -0400}, date-modified = {2021-09-07 10:36:18 -0400}, doi = {10.3390/s21165402}, journal = {Sensors}, keywords = {Misc.}, number = {16}, pages = {5402.1-5402.3}, title = {Sensors for Fire and Smoke Monitoring}, volume = {21}, year = {2021}, url-1 = {https://doi.org/10.3390/s21165402}}
@article{Mohona:aa, abstract = {High-resolution display bandwidth requirements often now exceed the capacity of display link channels necessitating compression. The goal of visually lossless compression codecs such as VESA DSC 1.2 is that viewers perceive no difference between the compressed and uncompressed images, maintaining long-standing expectations of a lossless display link. Such low impairment performance is difficult to validate as artifacts are at or below sensory threshold. We have developed a 3D version of the ISO/IEC 29170-2 flicker paradigm and used it to compare the effects of image compression in flat images presented in the plane of the screen (2D) to compression in flat images with a disparity offset from the screen (3D). We hypothesized that differences in the location and size of the compression errors between the disparate images in the 3D case would affect their visibility. The results showed that artifacts were often less visible in 3D compared to 2D viewing. These findings have practical applications with respect to codec performance targets and algorithm development for 3D movie, animation, and virtual reality content. In particular, higher compression should be attainable in stereoscopic compared to equivalent 2D images because of increased tolerance to artifacts that are binocularly unmatched or have disparity relative to the screen.}, author = {Mohona, S.S. and Wilcox, L. M. and Allison, R. S.}, date-added = {2021-02-18 14:16:38 -0500}, date-modified = {2021-08-02 16:48:34 -0400}, doi = {10.1002/jsid.1002}, journal = {Journal of the Society for Information Display}, keywords = {Stereopsis}, number = {8}, pages = {591-607}, title = {Subjective Assessment of Display Stream Compression for Stereoscopic Imagery}, volume = {29}, year = {2021}, url-1 = {https://doi.org/10.1002/jsid.1002}}
@article{Allison:jh, author = {Allison, R. S. and Wilcox, L. M.}, date-added = {2020-10-14 15:39:01 -0400}, date-modified = {2020-11-05 13:21:05 -0500}, doi = {10.1016/j.visres.2020.10.003}, journal = {Vision Research}, keywords = {Stereopsis}, pages = {70-78}, title = {Stereoscopic depth constancy from a different direction}, volume = {178}, year = {2021}, url-1 = {https://doi.org/10.1016/j.visres.2020.10.003}}
@article{Allison:2020rc, author = {Allison, R.S. and Fujii, Y. and Wilcox, L. M.}, date-added = {2020-09-14 16:17:34 -0400}, date-modified = {2021-06-04 17:30:49 -0400}, doi = {10.1109/TBC.2020.3028276}, journal = {IEEE Transactions on Broadcasting}, keywords = {Image Quality}, number = {2}, pages = {360-371}, title = {Effects of motion picture frame rate on material and texture appearance}, volume = {67}, year = {2021}, url-1 = {https://doi.org/10.1109/TBC.2020.3028276}}
@article{Zhao:fx, abstract = {Stereopsis has been shown to aid activities related to hand-eye coordination but it is less clear that stereopsis provides advantages in locomotion activities, such as walking and running, as steady viewing is needed to let stereopsis achieve maximum precision. While previous research has shown that stereopsis also helps people to make more accurate lower limb movements, these studies were conducted in setups with limited walking distances that did not represent typical walking scenarios in our everyday life --- we usually walk continuously over longer distances. Thus, it is still uncertain whether stereopsis helps people to make more accurate movements under constant motion during continuous walking. In the present study, we conducted two walking experiments in virtual environments using a linear treadmill and a novel projected display known as the Wide Immersive Stereo Environment (WISE) to study the role of stereopsis in continuous walking. The first experiment investigated the walking performance of people stepping over obstacles while the second experiment focused on a scenario on stepping over gaps. Both experiments were conducted under both stereoscopic viewing and non-stereoscopic viewing conditions. By analyzing the gait parameters, we found that stereopsis helped people to make more accurate movements to step over obstacles and gaps in continuous walking.}, author = {Zhao, J. and Allison, R. S.}, date-added = {2020-01-24 21:51:34 -0500}, date-modified = {2023-10-27 11:07:34 -0400}, doi = {10.1109/TVCG.2020.2969181}, journal = {IEEE Transactions on Visualization and Computer Graphics}, keywords = {Stereopsis}, number = {7}, pages = {3277-3288}, title = {The Role of Binocular Vision in Avoiding Virtual Obstacles While Walking}, url = {https://percept.eecs.yorku.ca/papers/zhao tvgc 2020 preprint.pdf}, volume = {27}, year = {2021}, url-1 = {https://doi.org/10.1109/TVCG.2020.2969181}}
@book{allisonbook:2021la, address = {Basel, Switzerland}, date-added = {2021-09-17 10:52:42 -0400}, date-modified = {2022-07-04 07:34:53 -0400}, editor = {Allison, R. S. and Johnston, J. M. and Wooster, M.}, keywords = {Misc.}, publisher = {MDPI}, title = {Sensors for Fire and Smoke Monitoring}, year = {2021}}
@incollection{Harris:2021bh, abstract = {Moving around in a zero-gravity environment is very different from moving on Earth. The vestibular system in 0g registers only the accelerations associated with movement and no longer has to distinguish them from the acceleration of gravity. How does this affect an astronaut's perception of space and movement? Here we explore how the perception of self-motion and distance changes during and following long-duration exposure to 0g. Our hypothesis was that absence of gravity cues should lead participants to rely more strongly on visual information in 0g compared to on Earth. We tested a cohort of ISS astronauts five times: before flight, twice during flight (within 6 days of arrival in space and after 3 months in 0g) and twice after flight (within 6 days of re-entry and 2 months after returning). Data collection is on-going, but we have currently tested 8 out of 10 participants. Using Virtual Reality, astronauts performed two tasks. Task 1, the perception of self-motion task, measures how much visual motion is required to create the sensation of moving through a particular distance. Astronauts viewed a target at one of several distances in front of them in a virtual corridor. The target then disappeared, and they experienced visually simulated self-motion along the corridor and pressed a button to indicate when they had reached the position of the remembered target. Task 2 was the perception of distance task. We presented a virtual cube in the same corridor and asked the astronauts to judge whether the cube's sides were longer or shorter than a reference length they held in their hands. We inferred the distance at which they perceived the target from the size that they chose to match the reference length. Preliminary analysis of the results with Linear Mixed-Effects Modelling suggests that participants did not experience any differences in perceived self-motion on first arriving in space (p = 0.783). After being in space for three months, however, they needed significantly more visual motion (7.5\%) to create the impression they had passed through the target distance (p < 0.001), indicating that visual motion (optic flow) elicited a weaker sense of self-motion than before adapting to the space environment. Astronauts also made size matches that were consistent with underestimating perceived distance in space (on arrival: 26.6\% closer, p < 0.001; after 3 months: 26.3\% closer, p < 0.001) compared to the pre-test on Earth. Our results indicate that prolonged exposure to 0g tends to decrease the effective use of visual information for the perception of travelled distance. This effect cannot be explained in terms of biased distance perception. Knowing that astronauts are likely to misperceive their self-motion and the scale their environment is critical information for the design of safe operations in space and for readjustment to other gravity levels found on the Moon and Mars. We acknowledge the generous support of the Canadian Space Agency (15ILSRA1-York).}, address = {Moscow, Russia}, annote = {HIS 23rd IAA Humans in Space05-08 April 2021}, author = {Harris, L. R. and Jorges, B and Bury, N. and McManus, M and Allison, R. S. and Jenkin, M}, booktitle = {IAA Humans in Space Conference}, date-added = {2023-03-21 17:32:59 -0400}, date-modified = {2023-03-21 17:32:59 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = {04}, title = {The Perception of Self-Motion in Microgravity}, url = {https://iaaspace.org/event/23rd-iaa-humans-in-space-symposium-2021/}, year = {2021}, url-1 = {https://iaaspace.org/event/23rd-iaa-humans-in-space-symposium-2021/}}
@incollection{Mohona:tb, abstract = {In virtual and augmented reality displays, lenses focus the near-eye display at a far optical distance and produce a large field of view to immerse the user. These lenses typically exhibit considerable distortion and cause chromatic aberration. These are not apparent to the user because they are typically corrected by pre-processing the image with the opposite distortion before sending it to the display. Such pre-processing involves pre-warping source images with inverse pin-cushion (barrel) distortion to correct for the pin-cushion transform from the display optics with different correction for each colour channel. Most image compression algorithms use a colour space conversion before compression which normally improves compression performance by reducing the degree of correlation between components. However, as lens pre-distortion processing is colour specific the spatial correlation between colour channels is disrupted by this processing; objective analyses suggest that the colour space conversion may not be beneficial under these conditions. Here we used the ISO/IEC 29170-2 flicker protocol that has been adapted for 3D imagery, to evaluate the sensitivity of two state- of-the-art display stream compression algorithms to characteristic distortions resulting from stereoscopic head-mounted display pre-processing which either included normal colour transformations or bypassed them. A set of 10 computer-generated stereoscopic high dynamic range images were tested. Images spanned a wide range of content and were designed to challenge the codecs. The pre-processing workflow involved pre-warping the images, compressing with each codec, and finally de-warping with pin-cushion distortion. De-warping was applied to simulate the distortion from magnifying lenses as all images were viewed on a mirror stereoscope without such lenses. The main image manipulations were the codec used, the compression levels and whether the colour transform was bypassed (bypass-on) or not (bypass-off). Images were compressed at the codec's respective nominal production level and at each image's estimated limit of visually lossless compression. 60 observers were tested in 3 groups of 10 for both codecs. Overall, we found little sensitivity to these distortions and our results confirmed that bypassing colour transforms in the codec can be significantly beneficial for some images.}, address = {Toronto, Canada}, annote = {June 14 -- 17, 2021}, author = {Sanjida Sharmin Mohona and Domenic Au and Yuqian Hou and Onoise Gerald Kio and James Goel and Natan Jacobson and Robert S. Allison and Laurie M. Wilcox}, booktitle = {CVR/VISTA Virtual Vision Futures Conference}, date-added = {2021-09-06 09:34:40 -0400}, date-modified = {2021-09-07 10:37:07 -0400}, keywords = {Image Quality}, month = {06}, pages = {48}, title = {Effects of Chromatic Aberration Compensation on Visibility of Compression Artifacts}, url = {https://www.yorku.ca/cvr/wp-content/uploads/sites/90/2021/06/VVF-program-updated.pdf}, year = {2021}, url-1 = {https://www.yorku.ca/cvr/wp-content/uploads/sites/90/2021/06/VVF-program-updated.pdf}}
@incollection{Jorges:mz, abstract = {Perceiving one's self-motion is a multisensory process involving integrating visual, vestibular and other cues. The perception of self-motion can be elicited by visual cues alone (vection) in a stationary observer. In this case, optic flow information compatible with self-motion may be affected by conflicting vestibular cues signaling that the body is not accelerating. Since vestibular cues are less reliable when lying down (Fernandez \& Goldberg, 1976), conflicting vestibular cues might bias the self-motion percept less when lying down than when upright. To test this hypothesis, we immersed 20 participants in a virtual reality hallway environment and presented targets at different distances ahead of them. The targets then disappeared, and participants experienced optic flow simulating constant-acceleration, straight-ahead self-motion. They indicated by a button press when they felt they had reached the position of the previously-viewed target. Participants also performed a task that assessed biases in distance perception. We showed them virtual boxes at different simulated distances. On each trial, they judged if the height of the box was bigger or smaller than a reference ruler held in their hands. Perceived distance can be inferred from biases in perceived size. They performed both tasks sitting upright and lying supine. Participants needed less optic flow (perceived they had travelled further) to perceive they had reached the target's position when supine than when sitting (by 4.8\%, bootstrapped 95\% CI=[3.5\%;6.4\%], determined using Linear Mixed Modelling). Participants also judged objects as larger (compatible with closer) when upright than when supine (by 2.5\%, 95\% CI=[0.03\%;4.6\%], as above). The bias in traveled distance thus cannot be reduced to a bias in perceived distance. These results suggest that vestibular cues impact self-motion distance perception, as they do heading judgements (MacNeilage, Banks, DeAngelis \& Angelaki, 2010), even when the task could be solved with visual cues alone.}, author = {Jorges, B and Bury, N. and McManus, M and Allison, R. S. and Jenkin, M and Harris, L. R.}, booktitle = {Journal of Vision (Vision Sciences Society Abstracts)}, date-added = {2021-09-06 09:10:13 -0400}, date-modified = {2021-09-11 22:21:25 -0400}, doi = {10.1167/jov.21.9.2301}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {2301}, title = {Body posture affects the perception of visually simulated self-motion}, volume = {21}, year = {2021}, url-1 = {https://doi.org/10.1167/jov.21.9.2301}}
@incollection{Teng:2021ty, abstract = {Humans use visual, vestibular, kinesthetic and other cues to effectively navigate through the world. Therefore, conflict between these sources of information has potentially significant implications for human perception of geometric layout. Previous work has found that introducing gain differences between physical and virtual head movement had little effect on distance perception. However, motion parallax is known to be a potent cue to relative depth. In the present study, we explore the impact of conflict between physical and portrayed self-motion on perception of object shape. To do so we varied the gain between virtual and physical head motion (ranging from a factor of 0.5 to 2) and measured the effect on depth perception. Observers viewed a `fold' stimulus, a convex dihedral angle formed by two irregularly-textured, wall-oriented planes connected at a common vertical edge. Stimuli were rendered and presented using head mounted displays (Oculus Rift S or Quest in Rift S emulation mode). On each trial, observers adjusted the angle of the fold till the two joined planes appeared perpendicular. To assess the role of stereopsis we tested binocularly and monocularly. To introduced motion parallax, observers swayed laterally through a distance of 30 cm at 0.5 Hz timed to a metronome beat; this motion was multiplied by the gain to produce the virtual view-point. Our results showed that gain had little effect on depth perception in the binocular test conditions. Using a model incorporating self and object motion, we computed predicted perceived depths based on the adjusted angles and then compared these with each observer's input. The modelled outcomes were very consistent across visual manipulations, suggesting that observers have remarkably accurate perception of object motion under these conditions. Additional analyses predict corresponding variations in distance perception and we will test these hypotheses in future experiments. }, author = {Teng, X. and Wilcox, L. M. and Allison, R. S.}, booktitle = {Journal of Vision (Vision Sciences Society Abstracts)}, date-added = {2021-09-06 09:10:13 -0400}, date-modified = {2021-09-06 09:10:13 -0400}, doi = {10.1167/jov.21.9.2035}, keywords = {Stereopsis}, pages = {2035}, title = {Interpretation of Depth from Scaled Motion Parallax in Virtual Reality}, volume = {21}, year = {2021}, url-1 = {https://doi.org/10.1167/jov.21.9.2035}}
@incollection{Tong:2021wf, abstract = {While much is known about our perception of surface slant for planar surfaces, less attention has been paid to our ability to estimate the average slant of curved surfaces. The average slant across a surface with symmetric curvature (a parabolic surface) and globally slanted about its axis of symmetry is equivalent to that of a planar surface slanted by the same degree. Therefore, if symmetrically curved surfaces are perceived accurately, observers' estimates of their average surface slant should be the same as for an equivalently slanted planar surface. Here we evaluated this prediction using a 2-alternative forced choice slant discrimination task. Observers (n=10) viewed a standard 15$\,^{\circ}$ (top-away) slanted planar surface and a comparison surface that varied in slant between 7.5$\,^{\circ}$ and 22.5$\,^{\circ}$; both were presented stereoscopically and textured with a Voronoi pattern. In separate conditions, the comparison surface was either planar, or parabolically curved (peak displacement = 0.15m) about its axis of rotation in a concave or convex direction. Observers consistently underestimated the average slant of the concave comparison surface relative to the planar surface. This bias is predicted by the effect of curvature modulating the degree of foreshortening in the perspective projection of a slanted surface. Perspective projection also predicts overestimation of average slant in convex surfaces, however we found no such bias. We propose that imprecision in the estimation of average slant in curved surfaces, relative to planar surfaces, makes them more susceptible to the commonly reported frontoparallel bias (slant underestimation). This bias may counteract the predicted overestimation of average slant in convex surfaces. Taken together, our modelling and psychophysical results indicate that curvature modulates the pattern of foreshortening of globally slanted surfaces, which biases the estimation of average slant. This, in turn, may lead to systematic errors in our interaction with curved surfaces. }, author = {Tong, J. and Allison, R. S. and Wilcox, L. M.}, booktitle = {Journal of Vision (Vision Sciences Society Abstracts)}, date-added = {2021-09-06 09:10:13 -0400}, date-modified = {2021-09-07 10:36:38 -0400}, doi = {10.1167/jov.21.9.2011}, keywords = {Stereopsis}, pages = {2011}, title = {The perception of average slant is biased in concave surfaces}, volume = {21}, year = {2021}, url-1 = {https://doi.org/10.1167/jov.21.9.2011}}
@incollection{Palmisano:2021sp, abstract = {When we rotate our heads during head-mounted display (HMD) based virtual reality (VR), our virtual head tends to trail its true orientation (due to display lag). However, the exact differences in our virtual and physical head pose (DVP) vary throughout the movement. We recently proposed that large amplitude, time varying patterns of DVP were the primary trigger for cybersickness in active HMD VR. This study tests the DVP hypothesis by measuring the sickness, and estimating the DVP, produced by head rotations under different levels of imposed display lag (from 0 to 200 ms). On each trial, users made continuous, oscillatory head movements in either yaw, pitch or roll while seated inside a large virtual room. After, we used the level of imposed display lag for the condition, and the user's own tracked head-motion data, to estimate their DVP time series data for each trial. Irrespective of the axis or the speed of the head movement, we found that DVP reliably predicted our participants experiences of cybersickness. Significant positive linear relationships were found between the severity of their sickness and the mean, peak and standard deviation of this DVP data. Thus, our DVP hypothesis appears to offer significant advantages over existing (general) theories of motion sickness in terms of understanding user experiences in HMD VR. Instead of merely speculating about the presence, or degree, of sensory conflict in a particular simulation, DVP can be used to estimate the conflict produced by the active HMD VR. Importantly, this DVP is an objective measure of the stimulation (not an internal model of the user's sensory processing). Compared to its many competitors, DVP also appears to provide a simpler operational definition of the provocative stimulation for cybersickness (since it is focussed only on movements of the head; not the body or limbs).}, author = {Palmisano, S.A. and Allison, R. S. and Kim, J.}, booktitle = {Journal of Vision (Vision Sciences Society Abstracts)}, date-added = {2021-09-06 09:10:13 -0400}, date-modified = {2021-09-06 09:11:08 -0400}, doi = {10.1167/jov.21.9.1966}, keywords = {Augmented & Virtual Reality}, pages = {1966}, title = {Differences in virtual and physical head orientation predict sickness during head-mounted display based virtual reality}, volume = {21}, year = {2021}, url-1 = {https://doi.org/10.1167/jov.21.9.1966}}
@incollection{Lee:2021aa, abstract = {Occlusion of one object by another is one of the strongest and best-known pictorial cues to depth. However, it has been suggested that, in addition to a cumulative sense of depth, successive occlusions of previous objects by newly presented objects can give rise to illusory motion in depth (Engel, Remus \& Sainath, 2006). Engel and colleagues (2006) found that a stacking disk stimulus, where each disk occludes a previous disk in a pile, generates a strong sensation of the stack moving towards the observer. While the perceived motion associated with this illusion has been studied, the resultant depth percept has not. To investigate if the successive introduction of occluding objects affected the perceived depth of a stacked disk stimulus, we compared two conditions. In one, participants were presented with two static piles of disks, while in the other, participants viewed one static and one stacking pile of disks. In both conditions, we presented 20 disks in one pile and a range of disks in the other using a method of constant stimuli. Participants indicated which pile appeared taller. The proportion of `taller' responses were fit with cumulative normal psychometric functions from which we calculated points of subjective equality for the number of disks in each pile. We found static piles with the same number of disks appeared approximately equal in height. In contrast, the successive presentation of disks in the stacking condition appeared to enhance the perceived height of the stack - fewer disks were needed to match the static pile. Surprisingly, we also found just-noticeable differences varied between conditions: the task was easier when participants compared stacking vs. static piles of disks. Our results suggest that successive occlusions generate a greater sense of height than occlusion alone, and that dynamic occlusion may be an underappreciated source of depth information.}, author = {Abigail R. I. Lee and Robert S. Allison and Laurie M. Wilcox}, booktitle = {Journal of Vision (Vision Sciences Society Abstracts)}, date-added = {2021-09-06 08:58:21 -0400}, date-modified = {2021-09-06 08:59:56 -0400}, doi = {10.1167/jov.21.9.1963}, keywords = {Stereopsis}, pages = {1963}, title = {Depth perception from successive occlusion}, volume = {21}, year = {2021}, url-1 = {https://doi.org/10.1167/jov.21.9.1963}}
@inproceedings{Palmisano:2021bs, author = {Palmisano, S.A. and Allison, R. S. and Kim, J.}, booktitle = {IEEE VR 2021 Workshop on Immersive Sickness Prevention (WISP)}, date-added = {2023-03-21 17:31:46 -0400}, date-modified = {2023-03-21 17:31:58 -0400}, keywords = {Augmented & Virtual Reality}, title = {Why do we get sick during HMD-based Virtual Reality?}, year = {2021}}
@inproceedings{Allison:2021ss, abstract = {VESA DSC and VDC-M (https://vesa.org/vesa-display-compression-codecs/) are in widespread usage in millions of display systems. This rollout was preceded by extensive and targeted subjective quality assessment to validate predictions of codec quality and visually lossless behaviour. In this talk, we will overview the assessment activities to date and their extension to applications in immersive displays. Our focus will be on subjective testing at York University using the ISO 29170-2 Appendix A protocol (1). In the ISO 29170-2 `flicker paradigm', the test and reference are presented side-by-side on the display (Figure 1). The test consists of the compressed image temporally interleaved (alternating) with the uncompressed version at a fixed frequency (typically 5 Hz). In the reference sequence, the uncompressed image alternates with itself. Participants view the test and reference sequences side by side and are asked to identify the compressed image (i.e., which image sequence contained flicker). We have also developed and implemented modified versions of the protocol to evaluate moving and stereoscopic displays. This testing has proceeded in discrete stages including: * Validation of visually lossless performance in a wide range of representative image samples * Confirmation of visually lossless performance in chroma subsampled images and moving content * Assessment of compression performance with high-dynamic range content * Assessment of compression performance with stereoscopic 3D content * Assessment of the effects of chromatic aberration correction on codec performance Testing has focused on challenging test cases to optimize the effort and benefit of time-consuming subjective assessment studies. Generally, both DSC and VDC-M have met expectations for visually lossless performance over a wide variety of content and use cases. Flicker testing is a highly conservative test procedure and codec performance in real world scenarios is expected to exceed that found under the harsher conditions of flicker testing. }, address = {Seoul, Korea}, annote = { IMID 2021, which will be held at COEX in Seoul, Korea from August 25 to 27, 2021, }, author = {Robert S. Allison and Laurie M. Wilcox}, booktitle = {21st International Meeting on Information Display, IMID 2021 Digest}, date-added = {2021-09-06 09:27:35 -0400}, date-modified = {2021-09-16 08:50:08 -0400}, keywords = {Stereopsis}, month = {08}, pages = {29}, title = {Subjective Quality Assessment of VESA Display Stream Compression Codecs}, url = {https://upload.congkong.net/imid2021/imid2021-e-proceedings.pdf}, year = {2021}, url-1 = {https://upload.congkong.net/imid2021/imid2021-e-proceedings.pdf}}
@inproceedings{Au:2021mb, address = {Seoul. Korea}, annote = { IMID 2021, which will be held at COEX in Seoul, Korea from August 25 to 27, 2021, }, author = {Domenic Au and Sanjida Sharmin Mohona and Yuqian Hou and Onoise Gerald Kio and James Goel and Natan Jacobson and Robert S. Allison and Laurie M. Wilcox}, booktitle = {21st International Meeting on Information Display, IMID 2021 Digest}, date-added = {2021-09-06 09:27:35 -0400}, date-modified = {2021-09-06 09:27:35 -0400}, keywords = {Stereopsis}, month = {08}, pages = {32}, title = {Sensitivity of VESA Display Stream Compression Codecs to Chromatic Aberration}, year = {2021}}
@inproceedings{Hosale:2021ss, address = {Chengdu, China}, annote = {Multimedia '21: ACM Multimedia 2021, October 20--24, 2021,}, author = {Hosale, M. D. and Allison, R. S. and Madsen, J. and Gordon, M.}, booktitle = {Proceedings of the 29th ACM International Conference on Multimedia}, date-added = {2021-07-14 22:07:57 -0400}, date-modified = {2022-08-17 16:26:20 -0400}, doi = {10.1145/3474085.3475524}, keywords = {Augmented & Virtual Reality}, pages = {3720--3727}, title = {ArtScience and the ICECUBE LED Display}, year = {2021}, url-1 = {https://doi.org/10.1145/3474085.3475524}}
@techreport{Goel:2021aa, abstract = {Use of image compression is essential to address the proliferation of high-performance displays in next generation vehicles. This paper details the trends impacting automotive display design and describes a new MIPI Display Working Group (DWG) study that verifies how the use of Video Electronics Standards Association Display Compression-M (VDC-M) within the MIPI Display Serial Interface 2 (DSI-2SM) protocol can provide visually lossless compression for automotive displays.}, author = {James Goel and Dale Stolitzka and Ian Smith and Natan Jacobson and Alain Legault and Kendra Wiley and Rick Wietfeld and Chris Wiesner and Kevin Yee and Robert Allison and Craig Wiley and Laurie Wilcox and Sharmion Kerley and Domenic Au and Melanie Cole}, date-added = {2021-04-03 08:10:40 -0400}, date-modified = {2021-04-03 08:14:36 -0400}, institution = {MIPI Alliance}, keywords = {Image Quality}, title = {Validating the Use of Compression for Automotive Displays}, year = {2021}}
@article{Bury:aa, author = {Bury, N. and Jenkin, M. and Allison, R. S. and Harris, L. R.}, date-added = {2020-10-09 08:40:15 -0400}, date-modified = {2020-10-23 15:59:39 -0400}, doi = {10.1371/journal.pone.0241087}, journal = {Plos ONE}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {10}, pages = {e0241087}, title = {Perceiving self-motion in a field of jittering lollipops from ages 4 to 95}, volume = {15}, year = {2020}, url-1 = {https://doi.org/10.1371/journal.pone.0241087}}
@article{Palmisano:ab, abstract = {Sensory conflict, eye-movement, and postural instability theories each have difficulty accounting for the motion sickness experienced during head-mounted display based virtual reality (HMD VR). In this paper we review the limitations of existing theories in explaining cybersickness and propose a practical alternative approach. We start by providing a clear operational definition of provocative motion stimulation during active HMD VR. In this situation, whenever the user makes a head movement, his/her virtual head will tend to trail its true position and orientation due to the display lag (or motion to photon latency). Importantly, these differences in virtual and physical head pose (DVP) will vary over time. Based on our own research findings, we propose that cybersickness in HMD VR is triggered by large magnitude, time-varying patterns of DVP. We then show how this hypothesis can be tested by: (1) systematically manipulating display lag magnitudes and head movement speeds across HMD VR conditions; and (2) comparing the user's estimates of DVP and cybersickness produced in each of these conditions. We believe that this approach will allow researchers to precisely predict which situations will (and will not) be provocative for cybersickness in HMD VR.}, annote = {Citation: Palmisano S, Allison RS and Kim J (2020) Cybersickness in Head-Mounted Displays Is Caused by Differences in the User's Virtual and Physical Head Pose. Front. Virtual Real. 1:587698. doi: 10.3389/frvir.2020.587698}, author = {Palmisano, S. and Allison, R. S. and Kim, J.}, date-added = {2020-10-02 08:41:49 -0400}, date-modified = {2020-10-23 19:07:21 -0400}, doi = {10.3389/frvir.2020.587698}, journal = {Frontiers in Virtual Reality}, keywords = {Augmented & Virtual Reality}, pages = {Article 587698}, title = {Cybersickness in Head-Mounted Displays is Caused by Differences in the User's Virtual and Physical Head Pose}, volume = {1}, year = {2020}, url-1 = {https://doi.org/10.3389/frvir.2020.587698}}
@article{Zhao:2019fj, author = {Zhao, J. and Allison, R. S.}, date-added = {2019-11-25 17:52:08 -0500}, date-modified = {2020-09-29 11:22:35 -0400}, doi = {10.1007/s10055-019-00416-7}, journal = {Virtual Reality}, keywords = {Augmented & Virtual Reality}, number = {515-524}, title = {Comparing Head Gesture, Hand Gesture and Gamepad Interfaces for Answering Yes/No Questions in Virtual Environments}, url = {http://link.springer.com/article/10.1007/s10055-019-00416-7}, url-1 = {http://link.springer.com/article/10.1007/s10055-019-00416-7}, volume = {24}, year = {2020}, url-1 = {http://link.springer.com/article/10.1007/s10055-019-00416-7}, url-2 = {https://doi.org/10.1007/s10055-019-00416-7}}
@article{Palmisano:aa, author = {Palmisano, S.A. and Nakamura, S. and Allison, R. S. and Riecke, B.}, date-added = {2019-09-05 09:21:06 -0400}, date-modified = {2020-06-17 08:24:37 -0400}, doi = {10.3758/s13414-019-01886-2}, journal = {Attention, Perception and Psychophysics}, keywords = {Stereopsis, Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {2098--2118}, title = {The Stereoscopic Advantage for Vection Persists Despite Reversed Disparity}, url = {https://rdcu.be/b4YOT}, volume = {82}, year = {2020}, url-1 = {https://doi.org/10.3758/s13414-019-01886-2}}
@article{Hartle:pb, abstract = {Objective: We examined the contribution of binocular vision and experience to performance on a simulated helicopter flight task. Background: Although there is a long history of research on the role of binocular vision and stereopsis in aviation, there is no consensus on its operational relevance. This work addresses this using a naturalistic task in a virtual environment. Method: Four high-resolution stereoscopic terrain types were viewed monocularly and binocularly. In separate experiments, we evaluated performance of undergraduate students and military aircrew on a simulated low hover altitude judgment task. Observers were asked to judge the distance between a virtual helicopter skid and the ground plane. Results: Our results show that for both groups, altitude judgments are more accurate in the binocular viewing condition than in the monocular condition. However, in the monocular condition, aircrew were more accurate than undergraduate observers in estimating height of the skid above the ground. Conclusion: At simulated altitudes of 5 ft (1.5 m) or less, binocular vision provides a significant advantage for estimation of the depth separation between the landing skid and the ground, regardless of relevant operational experience. However, when binocular cues are unavailable aircrew outperform undergraduate observers, a result that likely reflects the impact of training on the ability to interpret monocular depth cues.}, author = {Hartle, B. and Sudhama, Aishwarya and Deas, Lesley M. and Allison, Robert S. and Irving, Elizabeth L. and Glaholt, Mackenzie and Wilcox, Laurie M.}, date-added = {2019-06-08 18:33:06 -0400}, date-modified = {2020-09-27 17:15:09 -0400}, doi = {10.1177/0018720819853479}, journal = {Human Factors : The Journal of the Human Factors and Ergonomics Society}, keywords = {Stereopsis}, number = {5}, pages = {812-824}, title = {Contributions of stereopsis and aviation experience to simulated rotary wing altitude estimation}, url-1 = {https://doi.org/10.1177/0018720819853479}, volume = {62}, year = {2020}, url-1 = {https://doi.org/10.1177/0018720819853479}}
@incollection{Tong:2020aa, author = {Tong, J. and Allison, R. S. and Wilcox, L. M.}, booktitle = {ModVis 2020}, date-added = {2023-03-21 17:30:42 -0400}, date-modified = {2023-03-21 17:31:11 -0400}, keywords = {Stereopsis}, title = {Modvis: Modeling biases of perceived slant in curved surfaces}, year = {2020}}
@incollection{Jorges:2021aa, address = {Hong Kong}, author = {Bj{\"o}rn J{\"o}rges and Nils Bury and Meaghan McManus and Robert Allison and Michael Jenkin and Laurence Harris}, booktitle = {7th International Symposium on Visually-Induced Motion Sensations, VIMS 2020}, date-added = {2021-09-06 10:22:41 -0400}, date-modified = {2021-09-06 10:22:41 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = {12}, title = {Sex/gender differences in the perception of distance and self-motion}, url = {https://ieda.ust.hk/dfaculty/so/VIMS2020/}, year = {2020}, url-1 = {https://ieda.ust.hk/dfaculty/so/VIMS2020/}}
@incollection{Jorges:aa, abstract = {The perception of visually simulated self-motion is altered by body posture Author(s) and affiliation(s): Bj{\"o}rn J{\"o}rges, Nils Bury, Meaghan McManus, Robert Allison, Michael Jenkin, Laurence R. Harris Center for Vision Research, York University, 4700 Keele Street, Toronto, ON M3J 1P3, Canada The perception of self-motion is a multisensory process involving visual and vestibular cues, among others. Visual cues may become more important in visual-vestibular tasks when vestibular cues are attenuated, for example in determining the perceptual upright while lying supine[1]. We tested whether this effect might generalize to self-motion perception, where a higher effectiveness of visual cues should lead to an overestimation of traveled distance. We immersed participants in a virtual hallway and showed them targets at different distances ahead of them. The targets disappeared and participants experienced optic flow simulating straight-ahead self-motion. They indicated by button press when they felt they had reached the position of the target previously viewed. Participants also performed a control task to assess biases in depth perception. We showed them virtual boxes at different distances and they judged on each trial if the height of the box was bigger or smaller than a ruler in their hands. Perceived distance can be deduced from biases in perceived size. They performed both tasks sitting upright and lying supine. For the main task, we found that participants needed less optic flow to perceive they had reached the target's position when supine than when sitting (by 4.4\%, 95\% CI=[2.9\%;6.3\%], using Mixed Modelling). For the control task, participants underestimated the distance slightly less when supine (by 2.5\%, 95\% CI = [0.05\%;5.00\%], as above). When supine, participants needed to travel less far compared to sitting, even though they overestimated distance while supine versus sitting. The bias in traveled distance can thus not be reduced to a bias in perceived distance. Our experiment provides evidence that visual information is more important for the perception of self-motion when gravity is not aligned with the long body axis. We acknowledge the generous support of the Canadian Space Agency (15ILSRA1-York). [1] Dyde et al. (2006) Exp Brain Res 173:612--22 Name of corresponding author: Bjoern Joerges E-mail of corresponding author: bjoerges@yorku.ca }, annote = {Oct 5-7, 2020 virtual meeting}, author = {Bj{\"o}rn J{\"o}rges and Nils Bury and Meaghan MacManus and Robert S. Allison and Michael Jenkin and Laurence R. Harris}, booktitle = {3rd Interdisciplinary Navigation (iNAV2020) Symposium Proceedings}, date-added = {2020-10-27 13:50:05 -0400}, date-modified = {2020-10-27 13:50:05 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {64}, title = {The perception of visually simulated self-motion is altered by body posture}, url = {https://inavsymposium.com/wp-content/uploads/2020/10/Data_Blitz_Booklet_2020.pdf}, year = {2020}, url-1 = {https://inavsymposium.com/wp-content/uploads/2020/10/Data_Blitz_Booklet_2020.pdf}}
@incollection{Kuo:aa, abstract = {Walking interfaces for Virtual Reality often produce proprioceptive, vestibular and somatosensory signals which conflict with the visual presentation of terrain conditions in virtual environments. We compared locomotion decisions made using a dual joystick gamepad with a walking-in-place metaphor. Each trial presented two choices where the visual path condition differed in one of the following aspects: (a) incline, (b) friction, (c) texture, and (d) width. Users chose one of these paths by using the locomotion interface to walk to a goal. Their decisions were recorded and analyzed as a generalized linear mixed model. The results suggest that the walking-in-place interface produces choices of visual conditions that more often reflect expectations of walking in the real world: decisions that minimize energy expended or risk of injury. Because of this, we can infer that different walking interfaces can produce different results in virtual reality experiments. Therefore, behavioral scientists should be wary that sensory discrepancies between visual presentation and other modalities can negatively affect the ecological validity of studies using virtual reality. Consideration should be taken designing these studies to ensure that sensory inputs are as natural and consistent between modalities as possible. }, author = {Kuo, C. and Allison, R. S.}, booktitle = {Vision Sciences Society Annual Conference (Accepted but not presented because of COVID pandemic cancellation)}, date-added = {2020-10-27 13:38:54 -0400}, date-modified = {2020-10-27 13:45:03 -0400}, keywords = {Augmented & Virtual Reality}, title = {Locomotor decision-making altered by different walking interfaces in virtual reality}, year = {2020}}
@incollection{Cutone:aa, abstract = {For self-generated motion parallax, a sense of head velocity is needed to estimate distance from object motion. This information can be obtained from proprioceptive and visual sources. If visual and kinesthetic information are incongruent, the visual motion of objects will not match the sensed physical velocity of the head, resulting in a distortion of perceived distances. We assessed this prediction by varying the gain between physical observer head motion and the simulated motion. Given that the relative and absolute motion parallax would be greater than expected from head motion when gain was greater than 1.0, we anticipated that this manipulation would result in objects appearing closer to the observer. Using an HMD, we presented targets 1 to 3 meters away from the observer within a cue rich environment with textured walls and floors. Participants stood and swayed laterally at a rate of 0.5 Hz paced using a metronome. Lateral gain was applied by amplifying their real position by factors of 1.0 to 3.0, then using that to set the instantaneous viewpoint within the virtual environment. After presentation, the target disappeared and the participant performed a blind walk and reached for it. Their hand position was recorded and we computed positional errors relative to the target. We found no effect of motion parallax gain manipulation on binocular reaching accuracy. In a second study we evaluated the role of stereopsis in counteracting the anticipated distortion in perceived space by testing observers monocularly. In this case, distances were perceived as nearer as gain increased, but the effects were relatively small. Taken together our results suggest that observers are flexible in their interpretation of observer produced motion parallax during active head movement. This provides considerable tolerance of spatial perception to mismatches between physical and virtual motion in rich virtual environments.}, author = {Cutone, M. and Wilcox, L. M. and Allison, R. S.}, booktitle = {Journal of Vision (Vision Sciences Society Abstracts)}, date-added = {2020-10-27 13:09:43 -0400}, date-modified = {2020-10-27 13:09:43 -0400}, doi = {10.1167/jov.20.11.1426}, keywords = {Augmented & Virtual Reality}, number = {11}, pages = {1426}, title = {The impact of motion gain on egocentric distance judgments from motion parallax}, volume = {20}, year = {2020}, url-1 = {https://doi.org/10.1167/jov.20.11.1426}}
@incollection{Tong:aa, abstract = {Veridical perception of surface slant is important to everyday tasks such as traversing terrain and interacting with or placing objects on surfaces. However, natural surfaces contain higher-order depth variation, or curvature, which may impact how slant is perceived. We propose a computational model which predicts that curvature, real or distortion-induced, biases the perception of surface slant. The model is based on the perspective projection of surfaces to form ``retinal images'' containing monocular and binocular texture cues (gradients) for slant estimation. Curvature was either intrinsic to the modelled surface or induced by non-uniform magnification i.e. radial distortion (typical in wide-angle lenses and head-mounted display optics). The resulting binocular and monocular texture gradients derived from these conditions make specific predictions regarding perceived surface slant. In a series of psychophysical experiments we tested these predictions using slant discrimination and magnitude estimation tasks. Our results confirm that local slant estimation is biased in a manner consistent with apparent surface curvature. Further we show that for concave surfaces, irrespective of whether curvature is intrinsic or distortion-induced, there is a net underestimation of global surface slant. Somewhat surprisingly, we also find that the observed biases in global slant are driven largely by the texture gradients and not by the concurrent changes in binocular disparity. This is due to vertical asymmetry in texture gradients of curved surfaces with overall slant. Our results show that while there is a potentially complex interaction between surface curvature and slant perception, much of the perceptual data can be predicted by a relatively simple model based on perspective projection. The work highlights the importance of evaluating the impact of higher-order variations on perceived surface attitude, particularly in virtual environments in which curvature may be intrinsic or caused by optical distortion.}, author = {Tong, J. and Allison, R. S. and Wilcox, L. M.}, booktitle = {Journal of Vision (Vision Sciences Society Abstracts)}, date-added = {2020-10-27 13:09:43 -0400}, date-modified = {2020-10-27 13:09:43 -0400}, doi = {10.1167/jov.20.11.561}, keywords = {Stereopsis}, number = {11}, pages = {561}, title = {Modeling biases of perceived slant in curved surfaces}, volume = {20}, year = {2020}, url-1 = {https://doi.org/10.1167/jov.20.11.561}}
@incollection{Palmisano:2020df, abstract = {Research has shown that visual illusions of self-motion (vection) can be improved by adding consistent stereoscopic information to inducing displays. However here we examined the effect of placing this stereoscopic information into direct conflict with monocular motion signals (by swapping left and right eye views to reverse disparity). We compared the vection in depth induced by stereo-consistent, stereo-reversed and flat-stereo displays. We also manipulated the amount of monocular self-motion information in these inducing displays (by providing explicit changing-size cues in half of the trials). As expected, we found that stereo-consistent conditions improved the vection induced by both changing-size and same-size patterns of optic flow (relative to their equivalent flat-stereo conditions). However, stereo-reversed conditions were also found to improve the vection induced by same-size patterns of optic flow. Additional evidence from our experiments suggested that all of these stereoscopic advantages for vection were due to the effects on perceived motion-in-depth (not perceived scene depth). These findings demonstrate that stereoscopic information does not need to be consistent with monocular motion signals in order to improve vection in depth. Rather they suggest that stereoscopic information only needs to be dynamic (as opposed to static) in order to enhance the experiences of vection induced by optic flow.}, annote = {VSS 2020}, author = {Stephen Palmisano and Shinji Nakamura and Robert Allison and Bernhard Riecke}, booktitle = {Journal of Vision (Vision Sciences Society Abstracts)}, date-added = {2020-10-27 13:09:43 -0400}, date-modified = {2020-10-27 13:09:59 -0400}, doi = {10.1167/jov.20.11.339}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {11}, pages = {339}, title = {Pseudoscopic vection: Reversing stereo continues to improve self-motion perception despite increased conflict.}, volume = {20}, year = {2020}, url-1 = {https://doi.org/10.1167/jov.20.11.339}}
@incollection{Keyvanara:2020di, abstract = {Saccadic suppression of image displacement (SSD) is a perceptual feature of our visual system that oc-curs when we move our gaze from one fi xation to another.SSD has mostly been studied with the head fi xed. Normally when we move about we move our head as well as our eyes, although in virtual reality the virtual head movements may not correspond to the physical head movements producing a confl ict between vision and the vestibular sense. Here we investigated the SSD effect during simulated head movements. Participants' eyes were tracked as they viewed a set of 3D scenes with a constant (right-ward) camera pan. They produced a horizontal (rightward) saccade upon displacement of an object in the scene, during which a sudden shift of the scene occurred in one of 10 different directions. Us-ing a Bayesian adaptive procedure, we estimate thresholds for detection of these sudden camera movements. Within-subjects analysis showed that when users made horizontal saccades, the horizontal image translations were signifi cantly less detectable than vertical image translations and also less notice-able than and in-depth translations. Likewise, hori-zontal transsaccadic rotations were signifi cantly less detectable than vertical image rotations. These re-sults imply that in 3D virtual environment, when us-ers pan their head while making a horizontal saccade, they would be less susceptible to noticing horizontal changes to their viewpoint that occur dur-ing a saccade compared to vertical or in-depth changes. We are currently extending these studies to measure SSD during actual head motions in immer-sive VR, allowing us to assess the contributions of the visual, vestibular and proprioceptive senses. The interaction between head motion, eye movement and suppression of graphical updates during sac-cades can provide insight into designing better VR experiences. }, author = {Keyvanara, M. and Allison, R. S.}, booktitle = {Vestibular Oriented Research Meeting, Journal of Vestibular Research}, date-added = {2020-07-07 13:46:56 -0400}, date-modified = {2020-07-07 13:48:02 -0400}, doi = {10.3233/VES-200699}, keywords = {Eye Movements & Tracking}, pages = {142}, title = {Effects of simulated head motion and saccade direction on sensitivity to transsaccadic image motion}, volume = {30}, year = {2020}, url-1 = {https://doi.org/10.3233/VES-200699}}
@incollection{Cutone:wb, abstract = {For self-generated motion parallax, a sense of head velocity is needed to estimate distance from object motion (1). This information can be obtained from vestibular, proprioceptive, and visual sourc-es. If the magnitude of efferent signals from the ves-tibular system produced by head motion do not correlate with the velocity gradient of the visible op-tic fl ow pattern, a confl ict arises which leads to breakdown of motion-distance invariance. This po-tentially results in distortions of perceived distances to objects as visual and vestibular signals are non-concordant. We assessed this prediction by varying the gain between the observer's physical head mo-tion and simulated motion. Given that the relative and absolute motion parallax would be greater than expected from head motion when gain was greater than 1.0, we anticipated that this manipulation would result in objects appearing closer to the observer. Using an HMD, we presented targets 1 to 3 meters away from the observer within a cue rich environ-ment with textured walls and fl oors. Participants stood and swayed laterally at a rate of 0.5 Hz. Lat-eral gain was applied by amplifying their real posi-tion by factors of 1.0 to 3.0, then using that to set the instantaneous viewpoint within the virtual environ-ment. After presentation, the target disappeared, and the participant performed a blind walk and reached for it. Their hand position was recorded, and we computed positional errors relative to the target. We found no effect of our motion parallax gain manipu-lation on binocular reaching accuracy. To evaluate the role of stereopsis in counteracting the anticipated distortion in perceived space, we tested observers on the same task monocularly. In this case, distances were perceived as nearer as gain increased, but the effects were relatively small. Taken together our re-sults suggest that observers are fl exible in their inter-pretation of observer produced motion parallax during active head movement. This provides consid-erable tolerance of spatial perception to mismatches between physical and virtual motion in rich virtual environments}, author = {Cutone, M. and Wilcox, L. M. and Allison, R. S.}, booktitle = {Vestibular Oriented Research Meeting, Journal of Vestibular Research}, date-added = {2020-05-21 13:02:13 -0400}, date-modified = {2020-07-07 13:48:40 -0400}, doi = {10.3233/VES-200699}, keywords = {Augmented & Virtual Reality}, pages = {139}, title = {Distance perception when real and virtual head motion do not match}, volume = {30}, year = {2020}, url-1 = {https://doi.org/10.3233/VES-200699}}
@inproceedings{Flagler:aa, annote = {2020 IEEE International Conference on Systems, Man, and Cybernetics (SMC)October 11-14, 2020. Toronto, Canada}, author = {Flagler, T. and Tong, J. and Allison, R. S. and Wilcox, L. M.}, booktitle = {2020 IEEE International Conference on Systems, Man, and Cybernetics (SMC), October 11-14, 2020. Toronto, Canada}, date-added = {2020-09-27 15:00:22 -0400}, date-modified = {2020-12-02 13:12:47 -0500}, keywords = {Augmented & Virtual Reality}, pages = {3964-3968}, title = {Validity Testing the NeuLog Galvanic Skin Response Device}, url = {papers/flaglerSMC.pdf}, year = {2020}, url-1 = {papers/flaglerSMC.pdf}}
@inproceedings{Kuo:ab, abstract = {Virtual environments can replicate the visual appearance of terrain conditions, but the movements involved in using the interfaces confer their own bodily sensations, which can be incongruent with the visual presentation. If naturalness of interaction is a major factor contributing to the feeling of presence, it follows that a more natural locomotion interface should facilitate better presence, indicated by more natural locomotor behaviors. Here we propose a framework for studying the interaction of different locomotion interfaces with visual information on navigation decisions in virtual environments. We validated this framework by performing a user study that compared decisions made using a dual joystick gamepad with a walking-in-place metaphor. The paths presented on a given trial differed visually in one of the following aspects: (a) incline, (b) friction, (c) texture, and (d) width. In this experiment, choices made with the walking-in-place interface more closely matched visual conditions which would minimize energy expenditure or physical risk in the natural world. We provide some observations that would further improve this method in future implementations. This approach provides a way of both studying factors in perceptual decision making and demonstrates the effect of interface on presence as reflected by natural behavior.}, annote = {2020 IEEE International Conference on Systems, Man, and Cybernetics (SMC)October 11-14, 2020. Toronto, Canada}, author = {Kuo, C. and Allison, R. S.}, booktitle = {2020 IEEE International Conference on Systems, Man, and Cybernetics (SMC), October 11-14, 2020. Toronto, Canada}, date-added = {2020-09-27 15:00:22 -0400}, date-modified = {2020-12-02 13:12:58 -0500}, keywords = {Augmented & Virtual Reality}, pages = {3283-3290}, title = {Motion matters: Comparing presence induced by two locomotion interfaces using decision-making tasks in virtual reality}, url = {papers/kuoSMC.pdf}, year = {2020}, url-1 = {papers/kuoSMC.pdf}}
@inproceedings{Tong:ft, author = {Tong, J. and Allison, R. S. and Wilcox, L. M.}, booktitle = {IEEE International Symposium on Mixed and Augmented Reality (ISMAR)}, date-added = {2020-08-06 11:23:23 -0400}, date-modified = {2023-10-27 11:11:12 -0400}, doi = {10.1109/ISMAR50242.2020.00027}, keywords = {Augmented & Virtual Reality}, pages = {73-79}, title = {Optical distortions in {VR} bias the perceived slant of moving surfaces}, url = {https://percept.eecs.yorku.ca/papers/ISMAR_2020_VGTC_format.pdf}, year = {2020}, url-1 = {https://doi.org/10.1109/ISMAR50242.2020.00027}}
@inproceedings{Keyvanara:pz, annote = {Stuttgart Germany June 2020 (not held COVID)}, author = {Keyvanara, Maryam and Allison, R. S.}, booktitle = {ACM Symposium on Eye Tracking Research and Applications}, date-added = {2020-04-30 11:01:34 -0400}, date-modified = {2020-09-29 09:53:32 -0400}, doi = {10.1145/3379155.3391318}, keywords = {Eye Movements & Tracking}, pages = {Article No. 14, 1--8}, title = {Effect of a Constant Camera Rotation on the Visibility of Transsaccadic Camera Shifts}, year = {2020}, url-1 = {https://doi.org/10.1145/3379155.3391318}}
@inproceedings{Mohona:2020tz, abstract = {In stereoscopic displays different images are presented separately to the left and right eyes. This requirement may increase the bandwidth demand as well as increase the occurrence of visible compression-related artefacts. Here we report the results of a large-scale subjective assessment of high dynamic range (HDR) stereoscopic image compression. The ISO/IEC 29170-2 flicker paradigm was adapted for stereoscopic images and used to evaluate two VESA (Video Electronics Standards Association) image compression codecs: DSC 1.2a and VDCM 1.2.2. We compared the performance on stereoscopic images versus 2D images for both codecs.}, annote = {Althone Ireland May 26-28 2020}, author = {Mohona, S.S. and Au, D. and Kio, O. G. and Robinson, R. and Hou, Y. and Wilcox, L. M. and Allison, R. S.}, booktitle = {QoMEX International Conference on Quality of Multimedia Experience}, date-added = {2020-04-30 10:59:28 -0400}, date-modified = {2020-09-29 09:55:01 -0400}, doi = {10.1109/QoMEX48832.2020.9123129.}, keywords = {Stereopsis}, pages = {1-6}, title = {Subjective Assessment of Stereoscopic Image Quality: The Impact of Visually Lossless Compression}, year = {2020}, url-1 = {https://doi.org/10.1109/QoMEX48832.2020.9123129.}}
@misc{Hosale:2020qe, annote = {Hosale, Mark-David, Jim Madsen, and Robert Allison ICECUBE LED Display [ILDm^3], Interactive Installation in Disruptive Design and Digital Fabrication, Gayles Gallery, York University. February 3rd -- 13th 2020. }, author = {Mark-David Hosale and Jim Madsen and Robert Allison}, date-added = {2020-10-27 13:54:50 -0400}, date-modified = {2020-10-27 13:55:07 -0400}, howpublished = {Interactive Installation in Disruptive Design and Digital Fabrication, Gayles Gallery, York University}, keywords = {Misc.}, month = {02}, title = {ICECUBE LED Display [ILDm^3]}, year = {2020}}
@article{Tong:ab, abstract = {Modern virtual reality (VR) headsets use lenses that distort the visual field, typically with distortion increasing with eccentricity. While content is pre-warped to counter this radial distortion, residual image distortions remain. Here we examine the extent to which such residual distortion impacts the perception of surface slant. In Experiment 1, we presented slanted surfaces in a head-mounted display and observers estimated the local surface slant at different locations. In Experiments 2 (slant estimation) and 3 (slant discrimination), we presented stimuli on a mirror stereoscope, which allowed us to more precisely control viewing and distortion parameters. Taken together, our results show that radial distortion has significant impact on perceived surface attitude, even following correction. Of the distortion levels we tested, 5\% distortion results in significantly underestimated and less precise slant estimates relative to distortion-free surfaces. In contrast, Experiment 3 reveals that a level of 1\% distortion is insufficient to produce significant changes in slant perception. Our results highlight the importance of adequately modeling and correcting lens distortion to improve VR user experience.}, author = {Tong, J. and Allison, R. S. and Wilcox, L. M.}, date-added = {2019-11-08 16:00:34 -0500}, date-modified = {2020-04-30 15:31:26 -0400}, doi = {10.2352/J.ImagingSci.Technol.2019.63.6.060409}, journal = {Journal of Imaging Science and Technology}, keywords = {Stereopsis}, number = {6}, pages = {60409.1 - 60409.11}, title = {Radial distortions in {VR} displays impact the perception of surface slant}, volume = {63}, year = {2019}, url-1 = {https://doi.org/10.2352/J.ImagingSci.Technol.2019.63.6.060409}}
@article{Cutone:fv, author = {Cutone, M and Allison, R. S. and Wilcox, L. M.}, date-added = {2019-06-08 18:33:06 -0400}, date-modified = {2019-07-16 07:39:31 -0400}, doi = {10.1016/j.visres.2019.06.003}, journal = {Vision Research}, keywords = {Stereopsis}, pages = {43-51}, title = {The impact of retinal motion on stereoacuity for physical targets}, url-1 = {https://doi.org/10.1016/j.visres.2019.06.003}, volume = {161}, year = {2019}, url-1 = {https://doi.org/10.1016/j.visres.2019.06.003}}
@article{Fujii:2019kn, abstract = {The quality of stereoscopic 3D cinematic content is a major determinant for user experience in immersive cinema in both traditional theatres and cinematic virtual reality. One of the most important parameters is the frame rate of the content which has historically been 24 frames per second for movies, but higher frame rates are being considered for cinema and are standard for virtual reality. A typical behavioural response to immersive stereoscopic 3D content is vection, the visually-induced perception of self-motion elicited by moving scenes. In this work we investigated how participants' vection varied with simulated virtual camera speed, frame rate, and motion blur produced by the virtual camera's exposure, while viewing depictions of movement through a realistic virtual environment. We also investigated how their postural sway varied with these parameters and how sway covaried with levels of perceived self-motion. Results show that while average perceived vection significantly increased with 3D content frame rate and motion speed, motion blur had no significant effect on perceived vection. We also found that levels of postural sway induced by vection correlated positively with subjective ratings. }, author = {Fujii, Y. and Kio, O. G.. and Au, D. and Wilcox, L. M. and Allison, R. S.}, date-added = {2019-04-11 16:02:11 -0400}, date-modified = {2019-06-08 18:29:35 -0400}, doi = {10.1016/j.displa.2019.03.002}, journal = {Displays}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {33-43}, title = {Effects of Frame Rate on Vection and Postural Sway}, url = {http://percept.eecs.yorku.ca/papers/Effects of Frame Rate on Vection.pdf}, url-1 = {https://doi.org/10.1016/j.displa.2019.03.002}, volume = {58}, year = {2019}, url-1 = {https://doi.org/10.1016/j.displa.2019.03.002}}
@article{Guterman:aa, abstract = {Changing head orientation with respect to gravity changes the dynamic sensitivity of the otoliths to linear accelerations (gravitational and inertial). We explored whether varying head orientation and optic flow direction relative to gravity affects the perception of visually induced self-motion (vection) in two experiments. We confirmed that vertical optic flow produces stronger vection than horizontal optic flow in upright observers. We hypothesized that if this was due to aligning the simulated self-motion with gravity, then interaural (as opposed to spinal) axis motion while lying on the side would provide a similar vection advantage. Alternatively, motion along the spinal axis could enhance vection regardless of head orientation relative to gravity. Finally, we hypothesized that observer expectation and experience with upright locomotion would favour horizontal vection, especially when in upright posture. In the first experiment, observers stood and lay supine, prone, left and right side down, while viewing a translating random dot pattern that simulated observer motion along the spinal or interaural axis. Vection magnitude estimates, onset, and duration were recorded. Aligning the optic flow direction with gravity enhanced vection in side-laying observers as reflected by either a bias for interaural rather than spinal flow or by an elimination/reduction of the spinal advantage compared to upright. However, when overlapping these signals was not possible---as in the supine and prone posture---spinal axis motion enhanced vection. Furthermore, perceived scene structure varied with head orientation (e.g., dots were seen as floating bubbles in some conditions). To examine the influence of scene structure, in the second experiment we compared vection during simulated motion with respect to two environments: a rigid pipe structure that looked like a complex arrangement of plumbing pipes, and a field of dots. Interestingly, vertical optic flow with the pipes stimulus produced a similar experience to that of riding an elevator and tended to enhance vection. Overall, we found that vection depended on the direction of both the head orientation and visual motion relative to gravity, but was also influenced by the perceived scene context. These findings suggest that, in addition to head tilt relative to gravity, that higher-order cognitive processes play a key part in the perception of self-motion.}, author = {Guterman, P. and Allison, R. S.}, date-added = {2019-03-28 16:58:50 -0400}, date-modified = {2019-06-08 18:30:09 -0400}, doi = {10.1016/j.displa.2019.03.004}, journal = {Displays}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {44-55}, title = {Higher-order cognitive processes moderate body tilt effects in vection}, url = {http://percept.eecs.yorku.ca/papers/Guterman posture paper preprint.pdf}, url-1 = {https://doi.org/10.1016/j.displa.2019.03.004}, volume = {58}, year = {2019}, url-1 = {http://percept.eecs.yorku.ca/papers/Guterman%20posture%20paper%20preprint.pdf}, url-2 = {https://doi.org/10.1016/j.displa.2019.03.004}}
@article{Guterman:db, abstract = {When the head is tilted, an objectively vertical line viewed in isolation is typically perceived as tilted. We explored whether this shift also occurs when viewing global motion displays perceived as either object-motion or self-motion. Observers stood and lay left side down while viewing (1) a static line, (2) a random-dot display of 2-D (planar) motion, or (3) a random-dot display of 3-D (volumetric) global motion. On each trial, the line orientation or motion direction were tilted from the gravitational vertical, and observers indicated whether the tilt was clockwise or counter-clockwise from the perceived vertical. Psychometric functions were fit to the data and shifts in the point of subjective verticality (PSV) were measured. When the whole body was tilted, the perceived tilt of both a static line and the direction of optic flow were biased in the direction of the body tilt, demonstrating the so-called A-effect. However, we found significantly larger shifts for the static line than volumetric global motion as well as larger shifts for volumetric displays than planar displays. The A-effect was larger when the motion was experienced as self-motion compared to when it was experienced as object-motion. Discrimination thresholds were also more precise in the self-motion compared to object-motion conditions. Different magnitude A-effects for the line and motion conditions---and for object and self-motion---may be due to differences in combining of idiotropic (body) and vestibular signals, particularly so in the case of vection which occurs despite visual-vestibular conflict.}, author = {Guterman, P. S. and Allison, R. S.}, date-added = {2019-03-28 08:55:34 -0400}, date-modified = {2019-04-13 15:01:54 -0400}, doi = {10.3390/vision3020013}, journal = {Vision}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {2}, pages = {Article 13}, title = {The A-effect and global motion}, url = {http://percept.eecs.yorku.ca/papers/pearl Aeffect.pdf}, url-1 = {https://doi.org/10.3390/vision3020013%20}, volume = {3}, year = {2019}, url-1 = {http://percept.eecs.yorku.ca/papers/pearl%20Aeffect.pdf}, url-2 = {https://doi.org/10.3390/vision3020013}}
@article{Smith:2019aa, abstract = {Recent studies have confirmed that monovision treatment degrades stereopsis but it is not clear if these effects are limited to fine disparity processing, or how they are affected by viewing distance or age. Given the link between stereopsis and postural stability, it is important that we have full understanding of the impact of monovision on binocular function. In this study we assessed the short-term effects of optically induced monovision on a depth-discrimination task for young and older (presbyopic) adults. In separate sessions, the upper limits of stereopsis were assessed with participants' best optical correction and with monovision ( -1D and +1D lenses in front of the dominant and non-dominant eyes respectively), at both near (62 cm) and far (300 cm) viewing distances. Monovision viewing resulted in significant reductions in the upper limit of stereopsis or more generally in discrimination performance at large disparities, in both age groups at a viewing distance of 300 cm. Dynamic photorefraction performed on a sample of four young observers revealed that they tended to accommodate to minimize blur in one eye at the expense of blur in the other. Older participants would have experienced roughly equivalent blur in the two eyes. Despite this difference, both groups displayed similar detrimental effects of monovision. In addition, we find that discrimination accuracy was worse with monovision at the 3m viewing distance which involves fixation distances that are typical during walking. These data suggest that stability during locomotion may be compromised, a factor that is of concern for our older participants. }, author = {Smith, C. and Allison, R. S. and Wilkinson, F. and Wilcox, L. M.}, date-added = {2018-11-27 12:58:13 -0500}, date-modified = {2019-06-18 07:23:24 -0400}, doi = {10.1016/j.exer.2018.09.005}, journal = {Experimental Eye Research}, keywords = {Stereopsis}, pages = {62-67}, title = {Monovision: Consequences for depth perception from large disparities}, url = {http://percept.eecs.yorku.ca/papers/Monovision.pdf}, url-1 = {https://doi.org/10.1016/j.exer.2018.09.005}, volume = {183}, year = {2019}, url-1 = {http://percept.eecs.yorku.ca/papers/Monovision.pdf}, url-2 = {https://doi.org/10.1016/j.exer.2018.09.005}}
@incollection{Bury:2019rw, author = {Nils-Alexander Bury and Michael Jenkin and Robert Allison and Meaghan McManus and Laurence R. Harris}, booktitle = {4th German Human Physiology Workshop}, date-added = {2020-10-27 13:43:31 -0400}, date-modified = {2020-10-27 13:43:31 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {8}, publisher = {DLR}, title = {The Effect of Long Duration Hypogravity on the Perception of Self-Motion -- VECTION}, url = {https://www.dlr.de/me/en/Portaldata/25/Resources/dokumente/veranstaltungen/veranstaltungen_2019/hpw_2019/Tagungsband_HPW2019_final.pdf}, year = {2019}, url-1 = {https://www.dlr.de/me/en/Portaldata/25/Resources/dokumente/veranstaltungen/veranstaltungen_2019/hpw_2019/Tagungsband_HPW2019_final.pdf}}
@incollection{allison2019role, abstract = {Little is known about the role of stereopsis in locomotion activities, such as continuous walking and running. While previous studies have shown that stereopsis improves the accuracy of lower limb movements while walking in constrained spaces, it is still unclear whether stereopsis aids continuous locomotion during extended motion over longer distance. We conducted two walking experiments in virtual environments to investigate the role of binocular vision in avoiding virtual obstacles and traversing virtual gaps during continuous walking. The virtual environments were presented on a novel projected display known as the Wide Immersive Stereo Environment (WISE) and the participant locomoted through them on a linear treadmill. This experiment setup provided us with a unique advantage of simulating long-distance walking through an extended environment. In Experiment 1, along each 100-m path were thirty virtual obstacles, ten each at heights of 0.1 m, 0.2 m or 0.3 m, in random order. In Experiment 2, along each 100-m path were thirty virtual gaps, either 0.2 m, 0.3 m or 0.4 m across. During experi-mental sessions, participants were asked to walk at a constant speed of 2 km/h under both stereoscopic viewing and non-stereoscopic viewing condi-tions and step over virtual obstacles or gaps when necessary. By analyzing the gait parameters, such as stride height and stride length, we found that stereoscopic vision helped people to make more accurate steps over virtual obstacles and gaps during continuous walking. }, annote = {St. Pete's Beach May 2019}, author = {Allison, Robert and Zhao, Jingbo}, booktitle = {Journal of Vision (VSS Abstract)}, date-added = {2019-09-30 12:36:03 -0400}, date-modified = {2019-10-11 17:35:06 -0400}, journal = {Journal of Vision}, keywords = {Stereopsis}, number = {10}, pages = {222b--222b}, title = {The Role of Binocular Vision in Stepping over Obstacles and Gaps in Virtual Environment}, volume = {19}, year = {2019}}
@incollection{Tong:2019ng, abstract = {In the absence of reliable and abundant depth cues, estimates of surface slant are often biased towards fronto-parallel. Here we investigate the effects of curvature distortions on perceived slant. In general, curvature distortions are predicted to decrease the precision of slant discrimination, and this uncertainty may, in turn, strengthen the fronto-parallel bias. Alternatively, curvature distortion might bias slant perception independently, or in the opposite direction, of the fronto-parallel bias. We rendered images of slanted, textured surfaces with and without radially symmetric distortions (pincushion and barrel) at low (\approx 1\%) and high (\approx 5\%) levels. Observers judged whether a test image (distorted or undistorted, with a variety of slants) was more slanted than a distortion-free surface with a 15$\^{\circ}$ slant. We fit the psychometric data with a cumulative normal function and estimated bias and discrimi-nation thresholds for each observer. Our results showed that 1\% distortion had no measurable impact on slant discrimination. At 5\%, both types of distortion significantly increased slant discrimination thresholds. However, only the pincushion distortion produced a systematic underestimation of perceived slant. Slant underestimation in the presence of pincushion distortion is consistent with the hypothesized effect of disparity smoothing operations. Under this hypothesis, slant should also be underestimated in the barrel distortion condition but it is not. To test the possibility that this type of curvature distortion introduces additional perceptual biases, in ongoing experiments we are measuring perceived slant magnitude in the presence and absence of curvature distortion. These suprathreshold estimates will provide a baseline for the fronto-parallel bias in isolation; additional biases in the distortion conditions could then be modelled as distortion-based effects.}, annote = {St. Petes Beach May 2019}, author = {Tong, J. and Allison, R. S. and Wilcox, L. M.}, booktitle = {VSS 2019}, date-added = {2019-08-14 08:59:34 -0400}, date-modified = {2019-10-11 17:36:10 -0400}, keywords = {Stereopsis}, number = {10}, pages = {222a--222a}, publisher = {Journal of Vision (VSS Abstracts)}, title = {Slant perception in the presence of curvature distortion}, volume = {19}, year = {2019}}
@incollection{Keyvanara:2019rf, abstract = {Gaze-contingent displays use real-time eye movement data to adjust the display content according to user's gaze. Display updates must happen fast enough to prevent the user from noticing them. Saccadic suppression helps hide these updates. The aim of this study was to investigate which image transformations are less perceptible and hence more applicable during saccadic suppression periods. We designed our experimental environments in Unity3D and used an Eyelink1000 to sample the participants' gaze in real time. Participants viewed 3D scenes in which the camera panned from left to right at a constant rotational velocity. During this motion they made a horizontal (lrightward) or vertical (downward) saccade during which a sudden movement of the camera transformed the image of the scene. Camera movements were one of 6 translation and 4 rotational directions. Following the trial participants indicated the direction of the change in a 2AFC task. Discrimination thresholds for each type of transformation were estimated using an adaptive procedure to fit a Weibull psychometric function. During both horizontal and vertical saccades, thresholds were higher for horizontal translational and rotational camera movements than for other transformations. Further experiments are being conducted to determine if this generalizes but the current results imply that the direction of camera motion affects the detectability of camera transitions during saccades. Understanding the relationship between on-going movements and the detectability of a sudden transsaccadic change can help provide a better user experience for users of VR that hide graphical updates when they generate a saccade.}, annote = {18-22 August 2019 Alicante Spain}, author = {Keyvanara, Maryam and Allison, Robert}, booktitle = {Proceedings of 20th European Conference on Eye Movements. Journal of Eye Movement Research}, date-added = {2019-08-14 08:42:45 -0400}, date-modified = {2019-12-25 22:28:04 -0500}, journal = {Journal of Eye Movement Research}, keywords = {Eye Movements & Tracking}, number = {7}, pages = {214}, title = {Viewers' Sensitivity to Camera Motion during Saccades in a Virtual Environment}, url-1 = {https://doi.org/10.1177/0301006618824879}, volume = {12}, year = {2019}}
@incollection{Mohona:2019aa, abstract = {he Centre for Vision Research and Vision: Science to Applications INTERNATIONAL CONFERENCE ON PREDICTIVE VISIONJUNE 10-13, 2019}, annote = {INTERNATIONAL CONFERENCE ON PREDICTIVE VISIONJUNE 10-13, 2019NEW STUDENT CENTREYORK UNIVERSITY}, author = {Mohona, S. and Wilcox, L. M. and Allison, R. S.}, booktitle = {Predictive Vision}, date-added = {2019-06-15 16:30:42 -0400}, date-modified = {2019-06-15 16:31:12 -0400}, keywords = {Stereopsis}, publisher = {CVR Conference, June 10-13, 2019, Toronto, Canada}, title = {Subjective Assessment of Image Compression Artefacts in 2D viewing versus Stereoscopic Viewing}, year = {2019}}
@incollection{Tong:2019aa, annote = {INTERNATIONAL CONFERENCE ON PREDICTIVE VISIONJUNE 10-13, 2019NEW STUDENT CENTREYORK UNIVERSITY}, author = {Tong, J. and Allison, R. S. and Wilcox, L. M.}, booktitle = {Predictive Vision}, date-added = {2019-06-15 16:30:42 -0400}, date-modified = {2019-06-15 16:31:22 -0400}, keywords = {Stereopsis}, publisher = {CVR Conference, June 10-13, 2019, Toronto, Canada}, title = {Slant perception in the presence of radial distortions}, year = {2019}}
@incollection{Kio:2019aa, annote = {INTERNATIONAL CONFERENCE ON PREDICTIVE VISIONJUNE 10-13, 2019NEW STUDENT CENTREYORK UNIVERSITY}, author = {Kio, O. G. and Fujii, Y. and Au, D. and Wilcox, L. M. and Allison, R. S.}, booktitle = {Predictive Vision}, date-added = {2019-06-15 16:30:42 -0400}, date-modified = {2019-06-15 16:31:51 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, publisher = {CVR Conference, June 10-13, 2019, Toronto, Canada}, title = {Effects of stereoscopic 3D movie parameters on vection and postural sway}, year = {2019}}
@incollection{Au:2019aa, annote = {INTERNATIONAL CONFERENCE ON PREDICTIVE VISIONJUNE 10-13, 2019NEW STUDENT CENTREYORK UNIVERSITY}, author = {Au, D. and Allison, R. S. and Wilcox, L. M.}, booktitle = {Predictive Vision}, date-added = {2019-06-15 16:30:42 -0400}, date-modified = {2019-06-15 16:32:11 -0400}, keywords = {Stereopsis}, publisher = {CVR Conference, June 10-13, 2019, Toronto, Canada}, title = {Depth perception from monocular self-occlusions}, year = {2019}}
@incollection{keyvanara2019saccadic, abstract = {During a saccade, the image of the environment moves rapidly across the retina. However, due to saccadic suppression, our perception of motion is attenuated and our visual sensitivity is suppressed. We explored whether the extent of saccadic suppression depends on the type of image transformation. Participants observed three-dimensional scenes and made a vertical or horizontal saccade to follow a target object. During this saccade, the entire scene was translated or rotated along one of the canonical directions. After each trial, participants indicated the direction of the scene change in a forced-choice task. Change detectability depended on the magnitude and type of transformation. During vertical or horizontal saccades, users were least sensitive to vertical or horizontal translations, respectively, and most sensitive to rotations along the roll axis. We conclude that saccadic suppression affects natural image transformations that occur during whole body and head movement through our environment.}, annote = {Trieste Italy August 2018}, author = {Keyvanara, Maryam and Allison, Robert}, booktitle = {Proceedings of ECVP 2018, Perception}, date-modified = {2019-06-15 11:50:01 -0400}, doi = {10.1177/0301006618824879}, keywords = {Eye Movements & Tracking}, number = {S1}, pages = {71--71}, title = {Saccadic Suppression of Natural Image Transformations}, url-1 = {https://doi.org/10.1177/0301006618824879}, volume = {48}, year = {2019}, url-1 = {https://doi.org/10.1177/0301006618824879}}
@incollection{Allison:2019hw, annote = {Monday-Wednesday 14-16 January 2019 Hyatt Regency San Francisco Airport Hotel, Burlingame, California USA, USA. }, author = {Allison, R. S. and Wilcox, L. M.}, booktitle = {Electronic Imaging: Stereoscopic Displays and Applications 2019}, date-added = {2019-05-27 09:51:58 -0400}, date-modified = {2019-06-15 16:36:55 -0400}, keywords = {Stereopsis}, pages = {SD\&A-627}, title = {Stereoscopic capture and viewing parameters: Geometry and perception (Invited)}, year = {2019}}
@incollection{Hartle:2019aa, abstract = {Introduction: The relevance of stereopsis as a medical selection variable for aviators is a longstanding question in aviation medicine. In a prior study we observed superior altitude estimation when subjects viewed simulated terrain images in stereoscopic 3D compared to monocular viewing, thus supporting the relevance of stereopsis to aviation (Deas et al., AsMA 2016). However, in that study we used undergraduates as the subject population. Professional aviators undergo rigorous selection and training that may enhance their use of specific depth cues during altitude estimation. The present study investigated this possibility by directly comparing the performance of military aviators and undergraduates in the estimation of simulated altitude under binocular and monocular viewing conditions. Methods: Thirty-one trained military rotary-wing aircrew and thirty undergraduate observers participated in the experiment. Stimuli consisted of four high-resolution terrain images depicting a virtual helicopter skid above a ground plane, simulating a low hover scenario. The rendered altitude of the skid varied from zero to five feet. Observers were asked to judge the relative distance between the skid and the ground plane under binocular (using active shutter glasses) and monocular (wearing an eye patch) viewing conditions. Results: The aviators significantly outperformed the undergraduates in the monocular viewing condition, though for both groups monocular altitude estimates were less accurate than binocular estimates. During binocular viewing, both groups tended to make accurate altitude estimates; there was no evidence that the aviators were superior to undergraduates when binocular cues were available. Discussion: The finding of superior performance for aviators compared to undergraduates during monocular viewing is consistent with the hypothesis that selection and experience can enhance the use of monocular depth cues. However, the aviators performed similarly to undergraduates during binocular viewing and both groups were shown to benefit from binocular viewing compared to monocular viewing, suggesting that stereopsis contributes in the same manner to rotary-wing altitude estimation regardless of aviation experience. Future work might seek to extend these findings to more natural viewing conditions and link individual differences in stereopsis to altitude estimation performance. Learning Objective 1) Learn about the relevance of stereopsis to altitude estimation for rotary wing aviators. MOC Questions 1) Stereopsis is based on the processing of binocular cues (T/F). T 2) Altitude can be estimated based on monocular cues (T/F). T }, annote = {Hartle, B., Sudhama, A., Deas, L. M., Allison, R. S., Irving, E. L., Glaholt, M. G., \& Wilcox, L. M. Aviation experience and the role of stereopsis in altitude estimation. Presented to the 90th Annual Meeting of the Aerospace Medical Association (AsMA), Las Vegas, Nevada, USA. May 6, 2019.}, author = {Hartle, B. and Sudhama, A. and Deas, L. M. and Allison, R. S. and Irving, E. L. and Glaholt, M. and Wilcox, L. M.}, booktitle = {90th Annual Meeting of the Aerospace Medical Association (AsMA), Las Vegas, Nevada, USA.}, date-added = {2019-05-27 09:42:04 -0400}, date-modified = {2019-05-27 09:42:04 -0400}, keywords = {Stereopsis}, title = {Aviation experience and the role of stereopsis in rotary-wing altitude estimation}, year = {2019}}
@inproceedings{Keyvanara:2019aa, abstract = {In gaze-contingent displays, the viewer's eye movement data are processed in real-time to adjust the graphical content. To provide a high-quality user experience, these graphical updates must occur with minimum delay. Such updates can be used to introduce imperceptible changes in virtual camera pose in applications such as networked gaming, collaborative virtual reality and redirected walking. For such applications, perceptual saccadic suppression can help to hide the graphical artifacts. We investigated whether the visibility of these updates depends on the type of image transformation. Users viewed 3D scenes in which the displacement of a target object triggered them to generate a vertical or horizontal saccade, during which a translation or rotation was applied to the virtual camera used to render the scene. After each trial, users indicated the direction of the scene change in a forced-choice task. Results show that type and size of the image transformation affected change detectability. During horizontal or vertical saccades, rotations along the roll axis were the most detectable, while horizontal and vertical translations were least noticed. We confirm that large 3D adjustments to the scene viewpoint can be introduced unobtrusively and with low latency during saccades, but the allowable extent of the correction varies with the transformation applied.}, annote = {SAP '19, September 19--20, 2019, Barcelona, Spain}, author = {Keyvanara, Maryam and Allison, R. S.}, booktitle = {ACM Symposium on Applied Perception (SAP '19), September 19--20, 2019, Barcelona, Spain}, date-added = {2019-07-08 22:01:44 -0400}, date-modified = {2020-01-20 09:41:13 -0500}, doi = {10.1145/3343036.3343121}, keywords = {Eye Movements & Tracking}, pages = {Article 19, 1-9}, title = {Transsaccadic Awareness of Scene Transformations in a 3D Virtual Environment}, year = {2019}, url-1 = {https://doi.org/10.1145/3343036.3343121}}
@inproceedings{Au:aa, author = {Au, D. and Mohona, S. and Cutone, M. D. and Hou, Y. and Goel, J. and Jacobson, N. and Allison, R. S. and Wilcox, L. M.}, booktitle = {SID Symposium Digest of Technical Papers}, date-added = {2019-06-05 21:44:21 -0400}, date-modified = {2019-06-15 11:24:05 -0400}, doi = {10.1002/sdtp.12843}, keywords = {Image Quality}, number = {1}, pages = {13-16}, title = {3-4: Stereoscopic Image Quality Assessment}, url-1 = {https://doi.org/10.1002/sdtp.12843}, volume = {50}, year = {2019}, url-1 = {https://doi.org/10.1002/sdtp.12843}}
@techreport{Sudhama:2019aa, abstract = {Stereopsis is not currently a visual requirement for aircrew in the Royal Canadian Air force; however, it has been shown to be relevant to some aviation manoeuvers, particularly aerial refueling and landing. Commercial tests of stereoacuity are widely used to assess stereopsis in clinical practice but may not predict performance in real-world scenarios and tasks. In this series of experiments, we have made the first steps towards development of a stereoscopic depth discrimination task using naturalistic stimuli and a task (terrain relief judgement) that is relevant to flight crew. Stimuli consist of a stereoscopically rendered grassy terrain with a central mound or a dip with varying height/depth. We measured thresholds for discrimination of the direction of the depth offset. For comparison and validation of our Terrain test we also measured observers' performance on a set of commercial (Randot, StereoFly) and purpose-designed stereoacuity tests: the Ledge test, the Bar test, and the United States Air Force School of Aerospace Medicine (USAFSAM) Operational-based Vision Assessment (OBVA) Ring stereo test as additional comparison tests. To assess the impact of uninformative 2D shading cues on depth judgements in our Terrain test, we manipulated the intensity of the shading (low and high). Our results show that the Terrain test can be used as a test for stereovision, and thresholds are measureable for most observers in the low shading condition. However, as shading is intensified, a large proportion of observers (30\%) exhibit a strong convexity bias, resulting in reversals in perceived depth. Although the test is a promising measure of stereo, the bias tends to erode the usefulness in this regard. Currently, our analyses show weak correlation between thresholds obtained using our Terrain test and the other stereoacuity tests. However, this is possibly due to the narrow range of, primarily low, thresholds in this set of observers and additional testing with individuals with a broader range of stereoscopic ability is required. }, author = {Sudhama, Aishwarya and Hartle, Brittney and Allison, Robert S. and Irving, Elizabeth L. and Wilcox, Laurie M.}, date-added = {2018-12-24 13:50:07 -0500}, date-modified = {2019-09-27 11:02:02 -0400}, institution = {Defence Research and Development Canada}, keywords = {Stereopsis}, number = {DRDC-RDDC-2019-C119}, title = {Estimates of simulated ground relief as an operational test of stereoacuity for aviators}, year = {2019}}
@article{Allison:2018fr, abstract = {Advances in imaging and display engineering have given rise to improved and new image and video applications which aim to maximize visual quality under given resource constraints (e.g., power, bandwidth). Because the human visual system is an imperfect sensor, the images/videos can be represented in a mathematically lossy fashion, but with enough fidelity that the losses are visually imperceptible---commonly termed ``visually lossless.'' Although, a great deal of research has focused on gaining a better understanding of the limits of human vision when viewing natural images/video, a universally or even largely accepted definition of ``visually lossless'' remains elusive. Differences in testing methodologies, research objectives, and target applications have led to multiple ad-hoc definitions that are often difficult to compare to or otherwise employ in other settings. In this paper, we present a compendium of technical experiments relating to both vision science and visual quality testing that together explore the research and business perspectives of visually lossless image quality, as well as review recent scientific advances. Together, the studies presented in this paper suggest that a single definition of visually lossless quality might not be appropriate; rather, a better goal would be to establish varying levels of visually lossless quality that can be quantified in terms of the testing paradigm. }, annote = {Manuscript Title: Authors: Robert Allison, Kjell Brunnstrom, Damon Chandler, Hannah Colett, Philip Corriveau, Scott Daly, James Goel, Juliana Knopf, Laurie Wilcox, Yusizwan Yaacob, Shun-nan Yang, and Yi Zhang Paper Number: JEI 170771P }, author = {Allison, R. S. and Brunnstr\"om, K. and Chandler, D. M. and Colett, H. and Corriveau, P. and Daly, S. and Goel, J. and Knopf, J. and Wilcox, L. M. and Yaacob, Y. and Yang, S. and Zhang, Y.}, date-added = {2018-09-12 14:13:00 +0000}, date-modified = {2019-02-03 08:55:51 -0500}, doi = {10.1117/1.JEI.27.5.053035}, journal = {Journal of Electronic Imaging}, keywords = {Image Quality}, number = {5}, pages = {053035}, title = {Perspectives on the definition of visually lossless for mobile and large format displays}, url-1 = {https://doi.org/10.1117/1.JEI.27.5.053035}, volume = {27}, year = {2018}, url-1 = {https://doi.org/10.1117/1.JEI.27.5.053035}}
@article{Fujii:aa, author = {Fujii, Y. and Seno, T and Allison, R. S.}, date-added = {2017-11-04 23:09:56 +0000}, date-modified = {2018-03-19 12:55:01 +0000}, doi = {10.1007/s00221-017-5122-1}, journal = {Experimental Brain Research}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {1}, pages = {243--252}, title = {Smoothness of stimulus motion can affect vection strength}, url-1 = {http://dx.doi.org/10.1007/s00221-017-5122-1}, volume = {236}, year = {2018}, url-1 = {https://doi.org/10.1007/s00221-017-5122-1}}
@incollection{Harris:2018aa, abstract = {Physical linear acceleration experienced in micro-g can be misinterpreted as gravity and induce a sense of tilt (Cl\'ement et al. 2001. EBR 138: 410). Previous studies have suggested that visual acceleration may be more effective at inducing self-motion (vection) in micro-g and furthermore that perceived distance may be underestimated in these environments. The VECTION project will in the micro-g of the ISS: (1) quantify how much self-motion is evoked by visual acceleration; (2) investigate whether visual acceleration can be interpreted as gravity; and (3) exploit size-distance invariance hypothesis to assess the perception of distance. Forwards vection will be created using constant-acceleration (0.8m/s/s) translation down a virtual corridor presented to astronauts using a head-mounted display (HMD). We will assess the perceived distance of travel by asking them to indicate when they reached a previously presented target. In a second experiment lateral vection will be evoked by a period of sideways visual acceleration. Following the experience the screen will be blanked. If this acceleration is interpreted as gravity it will evoke a sense of tilt comparable to that found in the Cl\'ement et al. study. Perceived tilt will be assessed by aligning a line with the previously viewed floor of the simulated corridor. In a third experiment distance perception will be measured by asking astronauts to compare the size of an object presented at a known simulated distance with a physical reference held in their hands. In all cases data will be compared to ground control data taken before each astronaut's mission. Control data will also be collected from an age-and- gender-matched sample of na\:ive, earth-bound participants tested at approximately the same intervals as the astronauts. The on-orbit will be collected between 2018 and 2021 although ground-based testing has already commenced. VECTION will significantly improve safety wherever movement under microgravity conditions is required. }, author = {Harris, L. R. and Jenkin, M. R. and Allison, R. S. and McManus, M. and Bury, N.}, booktitle = {CASI Aero 2018, Quebec, Canada May 15-17, 2018}, date-added = {2018-09-12 14:24:37 +0000}, date-modified = {2024-03-04 22:15:38 -0500}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {69-70}, title = {The CSA VECTION project: interpreting visual acceleration in a micro-g environment}, year = {2018}}
@inproceedings{zhao_effects_2018, abstract = {Recent research has proposed teleoperation of robotic and aerial vehicles using head motion tracked by a head-mounted display (HMD). First-person views of the vehicles are usually captured by onboard cameras and presented to users through the display panels of HMDs. This provides users with a direct, immersive and intuitive interface for viewing and control. However, a typically overlooked factor in such designs is the latency introduced by the vehicle dynamics. As head motion is coupled with visual updates in such applications, visual and control latency always exists between the issue of control commands by head movements and the visual feedback received at the completion of the attitude adjustment. This causes a discrepancy between the intended motion, the vestibular cue and the visual cue and may potentially result in simulator sickness. No research has been conducted on how various levels of visual and control latency introduced by dynamics in robots or aerial vehicles affect users' performance and the degree of simulator sickness elicited. Thus, it is uncertain how much performance is degraded by latency and whether such designs are comfortable from the perspective of users. To address these issues, we studied a prototyped scenario of a head motion controlled quadcopter using an HMD. We present a virtual reality (VR) paradigm to systematically assess the effects of visual and control latency in simulated drone control scenarios.}, author = {Zhao, J. and Allison, R. S. and Vinnikov, M. and Jennings, S.}, booktitle = {2018 {IEEE} {International} {Conference} on {Systems}, {Man}, and {Cybernetics} ({SMC})}, date-added = {2019-02-03 08:17:58 -0500}, date-modified = {2019-04-13 15:32:08 -0400}, doi = {10.1109/SMC.2018.00505}, keywords = {Augmented & Virtual Reality}, month = 10, pages = {2972--2979}, title = {The {Effects} of {Visual} and {Control} {Latency} on {Piloting} a {Quadcopter} {Using} a {Head}-{Mounted} {Display}}, url = {https://arxiv.org/abs/1807.11123}, url-1 = {https://doi.org/10.1109/SMC.2018.00505}, year = {2018}, url-1 = {https://arxiv.org/abs/1807.11123}, url-2 = {https://doi.org/10.1109/SMC.2018.00505}}
@inproceedings{Keyvanara:2018:SNI:3204493.3204583, acmid = {3204583}, address = {New York, NY, USA}, author = {Keyvanara, Maryam and Allison, Robert S.}, booktitle = {Proceedings of the 2018 ACM Symposium on Eye Tracking Research \& Applications}, date-added = {2018-06-30 22:18:20 +0000}, date-modified = {2023-10-27 11:10:26 -0400}, doi = {10.1145/3204493.3204583}, isbn = {978-1-4503-5706-7}, keywords = {Eye Movements & Tracking, Augmented & Virtual Reality}, location = {Warsaw, Poland}, numpages = {5}, pages = {64:1--64:5}, publisher = {ACM}, series = {ETRA 18}, title = {Sensitivity to Natural 3D Image Transformations During Eye Movements}, url = {https://percept.eecs.yorku.ca/papers/Maryma ETRA_2018 preprint.pdf}, year = {2018}, url-1 = {http://doi.acm.org/10.1145/3204493.3204583}, url-2 = {https://doi.org/10.1145/3204493.3204583}}
@inproceedings{sudhama201885, author = {Sudhama, Aishwarya and Cutone, Matthew D and Hou, Yuqian and Goel, James and Stolitzka, Dale and Jacobson, Natan and Allison, Robert S and Wilcox, Laurie M}, booktitle = {SID Symposium Digest of Technical Papers}, date-modified = {2018-06-06 22:08:01 +0000}, doi = {10.1002/sdtp.12106}, keywords = {Image Quality}, number = {1}, pages = {1151--1154}, title = {85-1: Visually Lossless Compression of High Dynamic Range Images: A Large-Scale Evaluation}, url-1 = {https://doi.org/10.1002/sdtp.12106}, volume = {49}, year = {2018}, url-1 = {https://doi.org/10.1002/sdtp.12106}}
@inproceedings{cutone2018p, author = {Cutone, Matthew D and Dalecki, Marc and Goel, James and Wilcox, Laurie M and Allison, Robert S}, booktitle = {SID Symposium Digest of Technical Papers}, date-modified = {2018-06-06 22:08:46 +0000}, doi = {10.1002/sdtp.12154}, keywords = {Image Quality}, number = {1}, pages = {1312--1314}, title = {P-31: A Statistical Paradigm for Assessment of Subjective Image Quality Results}, url-1 = {https://doi.org/10.1002/sdtp.12154}, volume = {49}, year = {2018}, url-1 = {https://doi.org/10.1002/sdtp.12154}}
@inproceedings{Zhao:2016ab, abstract = {Mechanical repositioning is a locomotion technique that uses a mechanical device (i.e. locomotion interface), such as treadmills and pedaling devices, to cancel the displacement of a user for walking on the spot. This technique is especially useful for virtual reality (VR) systems that use large-scale projective displays for visualization. In this paper, we present a machine learning approach for developing a mechanical repositioning technique based on a 1-D treadmill for interacting with a unique new large-scale projective display, named as the Wide-Field Immersive Stereoscopic Environment (WISE). We also assessed the usability of the proposed approach through a novel user study that asked participants to pursue a rolling ball at variable speed in a virtual scene. Our results show that participants differ in their ability to carry out the task. We provide an explanation for the variable performance of the participants based on the locomotion technique.}, annote = {2nd International Workshop on Understanding Human Activities through 3D Sensors (UHA3DS'16) Dec 4 , 2016, Mexico, Mexico}, author = {Zhao, J. and Allison, R. S.}, booktitle = {Understanding Human Activities Through 3D Sensors. UHA3DS 2016.}, date-added = {2016-12-04 21:58:08 +0000}, date-modified = {2018-05-25 00:29:53 +0000}, doi = {10.1007/978-3-319-91863-1_5}, editor = {Hazem Wannous and Pietro Pala and Mohamed Daoudi and Francisco Fl{\'o}rez-Revuelta}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {59-73}, series = {Lecture Notes in Computer Science}, title = {Learning Gait Parameters for Locomotion in Virtual Reality Systems}, url-1 = {https://doi.org/10.1007/978-3-319-91863-1_5}, volume = {10188}, year = {2018}, url-1 = {https://doi.org/10.1007/978-3-319-91863-1_5}}
@misc{Hosale:2018aa, abstract = { ICECUBE LED Display [ILDm\^3] is a cubic-meter (1/1000th scale) model of the IceCube Neutrino Observatory. This novel telescope looks for nearly invisible cosmic messengers, neutrinos, using a cubic-kilometer of instrumented ice starting 1450 meters below the surface at the South Pole. ILDm\^3 sits low to the ground on a base of wood that supports 86 acrylic rods, each with 60 vertically arranged full-colour LED's (5,160 total). Each LED is a representation of a sensor, a.k.a. Digital Optical Module (DOM), in the South Pole array. A small interactive interface is used to control the work. Spatial sonification (sound mapping) of the data enhances the representation of events on the model, allowing observers to audio-locate events, as well as see them. This is a second, smaller version of an eight cubic meter (1:500 scale) display we developed previously. Through these projects we attempt to create a high quality experience of the information being presented with the goal of knowledge dissemination and the ability to convey an intuitive understanding and the experience of data. The models represent an epistemological nexus between art and science. While scientifically precise, the display uses art methodologies as an optimal means for expressing imperceptible astrophysical events as sound, light and colour in the domain of the human sensorium. The result is an experience that is as aesthetically critical as it is facilitatory to an intuitive understanding of sub-atomic astrophysical data, leading to new ways of knowing about our Universe and its processes. for more info visit: http://icecube.wisc.edu/ ndstudiolab.com/projects/icecube/ }, author = {Hosale, M. D. and Madsen, J. and Allison, R. S.}, date-added = {2018-02-14 03:03:53 +0000}, date-modified = {2019-02-03 08:35:07 -0500}, howpublished = {Art Exhibit (Exhibition Catalog) at Colour: what do you mean by that?}, keywords = {Misc.}, month = {03}, title = {ICECUBE LED Display}, year = {2018}}
@article{Kirollos:sf, abstract = {Behavioural studies have consistently found stronger vection responses for oscillating, compared to smooth/constant, patterns of radial flow (the simulated viewpoint oscillation advantage for vection). Traditional accounts predict that simulated viewpoint oscillation should impair vection by increasing visual--vestibular conflicts in stationary observers (as this visual oscillation simulates selfaccelerations that should strongly stimulate the vestibular apparatus). However, support for increased vestibular activity during accelerating vection has been mixed in the brain imaging literature. This fMRI study examined BOLD activity in visual (cingulate sulcus visual area --- CSv; medial temporal complex --- MT+; V6; precuneus motion area --- PcM) and vestibular regions (parieto-insular vestibular cortex --- PIVC/posterior insular cortex --- PIC; ventral intraparietal region --- VIP) when stationary observers were exposed to vection-inducing optic flow (i.e., globally coherent oscillating and smooth self-motion displays) as well as two suitable control displays. In line with earlier studies in which no vection occurred, CSv and PIVC/PIC both showed significantly increased BOLD activity during oscillating global motion compared to the other motion conditions (although this effect was found for fewer subjects in PIVC/PIC). The increase in BOLD activity in PIVC/PIC during prolonged exposure to the oscillating (compared to smooth) patterns of global optical flow appears consistent with vestibular facilitation. }, author = {Kirollos, R. and Allison, R. S. and Palmisano, S. A.}, date-added = {2017-06-26 22:02:24 +0000}, date-modified = {2018-01-02 15:42:33 +0000}, doi = {10.1163/22134808-00002593}, journal = {Multisensory Research}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {7-8}, pages = {739-761}, title = {Cortical Correlates of the Simulated Viewpoint Oscillation Advantage for Vection}, url-1 = {http://dx.doi.org/10.1163/22134808-00002593}, volume = {30}, year = {2017}, url-1 = {https://doi.org/10.1163/22134808-00002593}}
@article{Ritvo:hc, abstract = {Given the increasing variety, affordability and accessibility of sophisticated computer technologies, particularly webcam technologies, touchscreen interfaces, and wearables, there has been a marked increase in the application of such technology to enrich the lives of nonhuman animals (NHAs) and to study their cognitive abilities. However, the anthropocentric design of current computer systems is a barrier for successful adoption of such technologies by NHA users. NHA factors have not driven the design of the majority of computer technologies that has, or could be applied to this user population. This paper explores (a) how human-computer interaction (HCI) principles may apply (or not apply) to NHA-computer interaction (ACI), (b) how principles and computer system designs exclusive for ACI may be developed, and (c) how NHA-centered computer designs may benefit HCI and its user population.}, author = {Ritvo, S. E. and Allison, R. S.}, date-added = {2016-12-30 04:24:15 +0000}, date-modified = {2017-04-24 21:22:44 +0000}, doi = {10.1016/j.chb.2016.12.062}, journal = {Computers in Human Behavior}, keywords = {Misc.}, pages = {222-233}, title = {Designing for the exceptional user: Nonhuman animal-computer interaction ({ACI})}, url-1 = {http://dx.doi.org/10.1016/j.chb.2016.12.062}, volume = {70}, year = {2017}, url-1 = {https://doi.org/10.1016/j.chb.2016.12.062}}
@article{Vinnikov:sf, abstract = {Virtual reality simulations of group social interactions are important for many applications including the virtual treatment of social phobias, crowd and group simulation, collaborative virtual environments and entertainment. In such scenarios, when compared to the real world audio cues are often impoverished. As a result, users cannot rely on subtle spatial audio-visual cues that guide attention and enable effective social interactions in real world situations. We explored whether gaze-contingent audio enhancement techniques driven by inferring audio-visual attention in virtual displays could be used to enable effective communication in cluttered audio virtual environments. In all of our experiments, we hypothesized that visual attention could be used as a tool to modulate the quality and intensity of sounds from multiple sources to efficiently and naturally select spatial sound sources. For this purpose, we built a gaze-contingent display that allowed tracking of a user's gaze in real-time and modifying the volume of the speakers' voices contingent on the current region of overt attention. We compared six different techniques for sound modulation with a base condition providing no attentional modulation of sound. The techniques were compared in terms of source recognition and preference in a set of user studies. Overall, we observed that users liked the ability to control the sounds with their eyes. They felt that a rapid change in attenuation with attention but not the elimination of competing sounds (partial rather than absolute selection) was most natural. In conclusion, audio gaze-contingent displays offer potential for simulating rich, natural social and other interactions in virtual environments. They should be considered for improving both performance and fidelity in applications related to social behaviour scenarios or when the user needs to work with multiple audio sources of information.}, author = {Vinnikov, M. and Allison, R. S. and Fernandes, S.}, booktitle = {ACM CHI}, date-added = {2016-12-30 04:23:15 +0000}, date-modified = {2017-05-08 02:05:25 +0000}, doi = {10.1145/3067822}, journal = {{ACM TOCHI}}, keywords = {Eye Movements & Tracking}, number = {3}, pages = {19.1-19.38}, title = {Gaze-contingent Auditory Displays for Improved Spatial Attention}, url-1 = {http://dx.doi.org/10.1145/3067822}, volume = {24}, year = {2017}, url-1 = {https://doi.org/10.1145/3067822}}
@incollection{Allison:vn, abstract = {Modern digital cinema supports much higher frame rates (HFR) than the traditional 24 frames per second (fps). Theoretically, higher fidelity should allow viewers to see more detail. We filmed image sequences of a male and female actor (in different costume) at all combinations of two resolutions (2k and 4k), three frame rates (24, 48 and 60 fps), and two shutter angles (180\degree and 358\degree ). We asked viewers (N = 26) to watch 20-s movie clips and to rate (1) the image sharpness and (2) the quality of the motion. Motion quality and image sharpness ratings improved with increasing frame rate, especially from 24 to 48 fps. The ratings of sharpness for 180\degree shutter angle were higher than for 358\degree , consistent with the expectation of more motion blur in the latter. The benefit of higher resolution depended on frame rate: at 24 fps, ratings of sharpness for the 4k sequences were similar to, or even lower than, ratings for the 2k sequences. We propose that motion blur was more apparent in the low frame rate 4k imagery because it could be compared with high resolution, static, portions of the same image. Our results show that na\:ive observers perceive enhanced detail in moving fabrics and costumes in HFR film. This improved perception of detail could underlie both the positive and negative reactions to HFR film, depending on the nature of the content and whether it lends itself to such high fidelity. }, annote = {ECVP 2017 Berlin, Germany Poster/Talk presented at the European Conference on Visual Perception 2017, Berlin, Germany. Retrieved from URL: http://journals.sagepub.com/page/pec/collections/ecvp-abstracts/index/ecvp-2017 }, author = {Allison, R. S. and Fujii, Y. and Wilcox, L. M.}, booktitle = {Proceedings of ECVP 2017, Perception}, date-added = {2018-11-27 12:57:16 -0500}, date-modified = {2019-02-03 09:00:21 -0500}, keywords = {Image Quality}, pages = {111}, title = {Effects of motion picture frame rate on image quality}, url = {http://journals.sagepub.com/page/pec/collections/ecvp-abstracts/index/ecvp-2017}, url-1 = {http://journals.sagepub.com/page/pec/collections/ecvp-abstracts/index/ecvp-2017}, year = {2017}, url-1 = {http://journals.sagepub.com/page/pec/collections/ecvp-abstracts/index/ecvp-2017}}
@incollection{Fujii:yb, abstract = {We examined effect of frame rate of optical flow on vection strength. Downward (Experiment 1) and expanding (Experiment 2) grating movies were used as stimuli to induce vection. Frame rates were controlled in seven conditions (3, 4, 6, 12, 20, 30 and 60), and vection strength were measured with three indices (latency of vection onset, total duration time and subjective magnitude). We hypothesized that higher frame rate should induce stronger vection because low frame rate cause artifacts such as judder and motion blur. The results of both experiment clearly showed that vection strength increased with increasing frame rate, however, the rate of increase were not constant and saturated in the high range.}, annote = {Fukuoka}, author = {Fujii, Y. and Seno, T. and Allison, R. S.}, booktitle = {Fechner Day 2017 Conference Proceedings, The 33rd Annual Meeting of the International Society for Psychophysics, 22-26 October 2017, Fukuoka, Japan}, date-added = {2018-09-18 10:09:52 -0400}, date-modified = {2018-09-18 10:09:52 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {242}, title = {Effect of frame rate on vection strength}, volume = {33}, year = {2017}}
@incollection{Kio:aa, abstract = {The quality of stereoscopic 3D content is a major determinant for immersion and user experience in virtual reality (VR). Thus it is important that the effectiveness of stereoscopic 3D content parameters be assessed behaviourally. A typical behavioural response to VR is vection, the visually - induced perception of self - motion elicited by moving scenes. In this work we investigate how participants' vection and postural sway vary with the simulated optical flow speed and the virtual camera' s frame rate and exposure time while viewing depictions of movement through a realistic virtual environment. We compare the degree of postural sway obtained from the centre - of - pressure data of a Nintendo Wii Balance Board with subjective vection scores. Results obtained from this study show that average perceived vection increases with increase in frame rate and simulated speed but not with exposure time. We also found that perceived vection in VR does not induce significant postural sway in typical 3D cinema scenarios. We are currently conducting experiments to confirm whether this finding holds for immersive virtual reality scenarios where screen edge and other surround cues are eliminated. }, annote = {6th International Conference on VISUALLY INDUCED MOTION SENSATIONS Nov 16 - 17, 2017 Toronto, Canada}, author = {Kio, O. G. and Fujii, Y. and Wilcox, L. M. and Au, D. and Allison, R. S.}, booktitle = {6th International Conference on Visually induced Motion Sensations}, date-added = {2018-04-22 12:33:00 +0000}, date-modified = {2018-04-22 12:33:00 +0000}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {13}, title = {Perceived Vection and Postural Sway: A Behavioural Response to Virtual Reality}, url = {https://vims2017.org/program-1}, url-1 = {https://vims2017.org/program-1}, year = {2017}, url-1 = {https://vims2017.org/program-1}}
@incollection{Allison:aa, author = {Allison, R. S. and Fujii, Y. and Wilcox, L. M.}, booktitle = {Journal of Vision (VSS Abstracts)}, date-added = {2017-09-04 13:17:48 +0000}, date-modified = {2017-11-27 01:07:18 +0000}, doi = {10.1167/17.10.418}, keywords = {Image Quality}, number = {10}, pages = {418}, title = {Effects of motion picture frame rate on material and texture appearance}, url-1 = {http://dx.doi.org/10.1167/17.10.418%20}, volume = {17}, year = {2017}, url-1 = {https://doi.org/10.1167/17.10.418}}
@incollection{Hartle:2017ab, abstract = {The potential advantages of stereopsis for aircrew has long been an interest in aviation, but there is no clear consensus on its impact. One potential reason for this is the redundancy of monocular and binocular sources of depth information in natural environments. In a previous study, we assessed the impact of stereopsis on distance judgements during simulated helicopter low hover with the observer looking out of a helicopter, past the skid, to the ground plane. Specifically, we assessed relative distance estimation under stereoscopic (S3D) and monocular viewing conditions. We varied the availability of monocular depth cues by rendering the ground with four types of terrain. On each trial observers (n=14) were asked to estimate the distance between the skid and the ground plane relative to the distance from themselves to the skid. Our results showed that performance was more accurate in S3D than monocular conditions. Furthermore, under monocular viewing conditions, observers scaled their estimates with distance, but tended to strongly underestimate relative to predictions. These results support the hypothesis that stereopsis facilitates judgement of relative distance in low hover. However, the above results could reflect the fact that our observers were inexperienced. Task-specific training that aircrew receive could diminish potential benefits of stereopsis. In the current study, we replicated the previous experiment using trained aircrew (n=32). Aircrew demonstrated higher accuracy in the monocular conditions relative to inexperienced observers, though the benefit of binocular viewing was still observed. Interestingly, when monocular information was unreliable judgements made by aircrew were less precise than conditions with reliable monocular cues. Overall, the presence of stereopsis improved accuracy of relative distance judgements in low hover for inexperienced observers and trained aircrew. Despite higher accuracy in monocular conditions for aircrew, their estimates were affected by unreliable monocular information while inexperienced observers were not. These results highlight the impact of task-specific training on the accuracy of depth judgements in rotary wing flight operations. }, annote = {York June 2017}, author = {Hartle, B. and Deas, L. M. and Allison, R. S. and Irving, E. L. and Glaholt, M. and Wilcox, L. M.}, booktitle = {Centre for Vision Research International Conference on Vision in the Real World}, date-added = {2017-09-03 18:22:12 +0000}, date-modified = {2017-09-03 18:22:39 +0000}, keywords = {Stereopsis}, title = {Does Task-Specific Experience Improve Altitude Estimation in Virtual Environments?}, year = {2017}}
@incollection{Hartle:2017aa, abstract = {The contribution of stereopsis to aviation has long been a topic of interest, but there is no consensus on its impact. This is in part due to the diversity of methodologies and tasks used, but also reflects the availability of monocular depth cues that can support altitude estimation. Here, we evaluated the contribution of monocular and binocular depth cues to altitude estimation in a simulated low hover task commonly performed by helicopter aircrew. Using a stereoscopic display, trained aircrew (n=32) estimated the altitude from a skid to the ground plane under stereoscopic and monocular viewing conditions. The ground plane was rendered with four textures at a range of altitudes. Altitude estimation was more accurate in stereoscopic than monocular conditions. Under monocular viewing, observers scaled their estimates with distance, but substantially underestimated the amount of depth. Comparison of these results with those obtained using na{\"\i}ve observers (Deas et al., in press) showed that the aircrew were more accurate in monocular test conditions than n{a\:i}ve observers, but the performance of both groups was significantly improved when stereoscopic depth information was available. This pattern of results suggests that while aircrew can learn to capitalize on monocular depth cues for specific in-flight tasks, stereopsis makes a substantial contribution to operational performance for rotary wing altitude estimation.}, annote = {2017 OSA FVM Annual Meeting in Washington, DC Hartle, B abd Deas1, deas.lesley@gmail.com Robert S. Allison2, allison@cse.yorku.ca Elizabeth L. Irving3, Elizabeth.irving@uwaterloo.ca Mackenzie Glaholt4, mackenzie.glaholt@drdc-rddc.gc.ca Laurie M. Wilcox1, lwilcox@yorku.ca }, author = {Hartle, B. and Deas, L. M. and Allison, R. S. and Irving, E. L. and Glaholt, M. and Wilcox, L. M.}, booktitle = {Journal of Vision (2017 OSA Fall Vision Meeting Annual Meeting Abstracts)}, date-added = {2017-09-03 18:16:36 +0000}, date-modified = {2019-02-03 09:10:06 -0500}, doi = {10.1167/17.15.42}, keywords = {Stereopsis}, number = {15}, pages = {42}, title = {The contribution of monocular and binocular cues to altitude estimation in aircrew}, url = {http://jov.arvojournals.org/article.aspx?articleid=2667401}, url-1 = {http://jov.arvojournals.org/article.aspx?articleid=2667401}, url-2 = {https://dx.doi.org/10.1167/17.15.42}, volume = {17}, year = {2017}, url-1 = {http://jov.arvojournals.org/article.aspx?articleid=2667401}, url-2 = {https://doi.org/10.1167/17.15.42}}
@incollection{Deas:fk, abstract = {INTRODUCTION: Stereopsis is the ability to perceive depth based on binocular disparity and is believed to be important for certain aircrew tasks. For example, functional stereoscopic vision has been shown to provide an advantage for boom operators during certain aerial refueling scenarios. We propose that stereopsis will aid depth estimation in rotary-wing hover maneuvers. To test this hypothesis, we assessed performance on a distance estimation task under stereoscopic (S3D) and monocular (2D) viewing conditions. METHODS: Four types of S3D still images (3 terrains, one control pattern without 2D cues) were simulated from the point of view of a Flight Engineer looking downward (45deg) out a helicopter door. The end of a helicopter skid was visible and provided a consistent reference point in all images. Test altitudes from the skid to the ground ranged from 0-5ft with a 2'' step size. Observers (n=14, 7 female) estimated the distance between the skid and the ground. To do this, they assigned a value to represent the distance between their head position and the skid, and judged the distance from the skid to the ground relative to that value. All observers participated in S3D and 2D viewing conditions. Normalized data was analyzed using a linear mixed-effects model with full maximum-likelihood estimation methods. RESULTS: Estimates of relative distance were significantly affected by the viewing mode: performance was significantly more accurate in the S3D than in the 2D conditions. When terrains were viewed monocularly, observers did scale their estimates with distance, but were well below expected values. DISCUSSION: These results support the hypothesis that stereopsis facilitates judgements of relative distance in simulated low hover scenarios. Future experiments will determine if the advantage afforded by stereopsis remains at larger distances (high hover), and if it is maintained when additional 2D information (e.g. relative size) is available. Learning Objectives: 1. The participant will learn about operational requirements for stereo-scopic depth perception in the context of rotary wing operations.}, annote = {Denver CO - May 1-4, 2017}, author = {Deas, L. and Allison, R. S. and Hartle, H. and Irving, E. L. and Glaholt, M. and Wilcox, L. M.}, booktitle = {Aerospace Medical Association Annual Scientific Meeting, Aerospace Medicine and Human Performance, Vol. 88, No. 3 March 2017}, date-added = {2017-06-08 17:45:55 +0000}, date-modified = {2018-11-25 13:29:33 -0500}, keywords = {Stereopsis}, number = {3}, pages = {259-260}, title = {The impact of stereoscopic 3d depth cues on distance estimation in a simulated low hover scenario}, volume = {88}, year = {2017}}
@incollection{Cutone:2017aa, abstract = {The transmission of digital image content frequently involves some form of compression to reduce the demand and complexity of the communication medium. Image data is compressed via a codec, which removes information that is either redundant or largely imperceptible to reduce the bit-rate required to transmit the image to the target device. In so-called `lossy' compression, data from the original image signal cannot be completely recovered upon decoding, which can produce perceptible artifacts or noise. Psychophysical methods exist to assess artefact perceptibility and subjective preference following compression. A current industry standard (ISO/IEC-29170-2 Annex B) specifies a two-alternative forced choice procedure to measure artefact visibility. In this protocol, two versions of the same image are presented side-by-side on a display. In one location an original (reference) and compressed image are temporally interleaved, while in the other location the original is presented repeatedly. Detectable differences between the original and compressed images will appear as localized flicker and observers are asked to indicate which of the images appears to flicker. The recommended statistical procedures outlined in the standards document are descriptive and do not assess the relative performance between codecs. Here, we describe a statistical procedure that can be used to evaluate the relative performance of different codecs based on the ISO/IEC protocol results. }, annote = {Toronto June 2017}, author = {Cutone, M. D. and Wilcox, L. M. and Allison, R. S.}, booktitle = {Centre for Vision Research International Conference on Vision in the Real World}, date-added = {2017-06-08 17:40:28 +0000}, date-modified = {2017-09-03 18:24:59 +0000}, keywords = {Image Quality}, pages = {17}, title = {Statistical procedure for assessing the relative performance of codecs using the flicker paradigm}, year = {2017}}
@incollection{Kio:2017wf, abstract = {The potential advantages of stereopsis for aircrew has long been an interest in aviation, but there is no clear consensus on its impact. One potential reason for this is the redundancy of monocular and binocular sources of depth information in natural environments. In a previous study, we assessed the impact of stereopsis on distance judgements during simulated helicopter low hover with the observer looking out of a helicopter, past the skid, to the ground plane. Specifically, we assessed relative distance estimation under stereoscopic (S3D) and monocular viewing conditions. We varied the availability of monocular depth cues by rendering the ground with four types of terrain. On each trial observers (n=14) were asked to estimate the distance between the skid and the ground plane relative to the distance from themselves to the skid. Our results showed that performance was more accurate in S3D than monocular conditions. Furthermore, under monocular viewing conditions, observers scaled their estimates with distance, but tended to strongly underestimate relative to predictions. These results support the hypothesis that stereopsis facilitates judgement of relative distance in low hover. However, the above results could reflect the fact that our observers were inexperienced. Task-specific training that aircrew receive could diminish potential benefits of stereopsis. In the current study, we replicated the previous experiment using trained aircrew (n=32). Aircrew demonstrated higher accuracy in the monocular conditions relative to inexperienced observers, though the benefit of binocular viewing was still observed. Interestingly, when monocular information was unreliable judgements made by aircrew were less precise than conditions with reliable monocular cues. Overall, the presence of stereopsis improved accuracy of relative distance judgements in low hover for inexperienced observers and trained aircrew. Despite higher accuracy in monocular conditions for aircrew, their estimates were affected by unreliable monocular information while inexperienced observers were not. These results highlight the impact of task-specific training on the accuracy of depth judgements in rotary wing flight operations. }, annote = {Toronto June 2017}, author = {Kio, O. G. and Fuji, Y. and Wilcox, L. M. and Au, D. and Allison, R. S.}, booktitle = {Centre for Vision Research International Conference on Vision in the Real World}, date-added = {2017-06-08 17:40:28 +0000}, date-modified = {2017-09-03 18:24:50 +0000}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {24}, title = {Psychophysical response to virtual reality: Vection and postural sway}, year = {2017}}
@incollection{Aishwarya-Sudhama:2017jk, abstract = {Advances in high-dynamic range, wide-colour-gamut displays have created unparalleled opportunities for improving image quality, but have also driven rapid expansion of data bandwidth requirements. To meet these needs, there is increasing demand for low-impairment display stream compression (DSC). The goal of low-impairment DSC is to ensure that the final product meets demanding compression targets, while being perceptually identical to the original image. Objective approaches, based on error metrics, are useful to a point, but cannot reliably predict the visibility of artefacts near the limits of detection. Thus, subjective assessments are required to confirm that compression is visually lossless, a task that is is made more complex by the fact that the benchmarks (e.g., what is visually lossless) are not well defined, and by a lack of theory linking these perceptual outcomes to objective error metrics. Subjective quality measures can be dramatically affected by choice of methodology, content and participant experience. Here we will discuss this issue in the context of our recent experiments in which we evaluated leading low impairment algorithms using a common image set, and a side-by-side flicker detection paradigm (ISO/IEC 29170-2). In follow-up trials we evaluated these same codecs using a modified motion-based paradigm and show that in this more realistic viewing scenario, viewers are often less sensitive to compression-related artefacts. }, annote = {Toronto June 2017}, author = {Sudhama, A. and Deas, L. M. and Goel, J and Allison, R. S. and Wilcox, L. M.}, booktitle = {Centre for Vision Research International Conference on Vision in the Real World}, date-added = {2017-06-08 17:40:28 +0000}, date-modified = {2017-09-03 18:24:20 +0000}, keywords = {Image Quality}, pages = {33}, title = {Subjective evaluation of image quality}, year = {2017}}
@incollection{Zhao:2017aa, annote = {Toronto June 2017}, author = {Zhao, J. and Allison, R. S.}, booktitle = {Centre for Vision Research International Conference on Vision in the Real World}, date-added = {2017-06-08 17:40:28 +0000}, date-modified = {2017-09-03 18:25:26 +0000}, keywords = {Augmented & Virtual Reality}, pages = {39}, title = {Tolerance of Latency in Controlling a Quadcopter using a Head Mounted Display}, year = {2017}}
@incollection{Allison:yu, abstract = {State-of-the-art stereoscopic displays and virtual reality systems offer the promise of new immersive experiences. They also pose significant perceptual human factors challenges. We have been studying the sensitivity and tolerance of viewers to the key parameters content makers use to produce stereoscopic 3D media. These parameters potentially affect both the perception of depth in a 3D scene, and our sense of motion through it. I will review progress toward understanding when and how these artistic decisions impact a viewer's perceptual experience.}, author = {Allison, R. S.}, booktitle = {Centre for Vision Research International Conference on Vision in the Real World}, date-added = {2017-06-08 17:40:28 +0000}, date-modified = {2017-06-08 17:40:28 +0000}, keywords = {Stereopsis}, pages = {11}, title = {Perception in Stereoscopic 3D Media}, year = {2017}}
@incollection{Wilcox:aa, author = {Wilcox, L. M. and Allison, R. S. and Goel, J.}, booktitle = {Electronic Imaging: Human Vision and Electronic Imaging panel presentation}, date-added = {2017-04-14 15:16:12 +0000}, date-modified = {2017-04-14 15:16:12 +0000}, keywords = {Image Quality}, pages = {HVEI-129}, title = {Subjective assessment and the criteria for visually lossless compression (Invited)}, year = {2017}}
@inproceedings{zhao_real-time_2017, abstract = {Head gesture is a natural means of face-to-face communication between people but the recognition of head gestures in the context of virtual reality and use of head gesture as an interface for interacting with virtual avatars and virtual environments have been rarely investigated. In the current study, we present an approach for real-time head gesture recognition on head-mounted displays using Cascaded Hidden Markov Models. We conducted two experiments to evaluate our proposed approach. In experiment 1, we trained the Cascaded Hidden Markov Models and assessed the offline classification performance using collected head motion data. In experiment 2, we characterized the real-time performance of the approach by estimating the latency to recognize a head gesture with recorded real-time classification data. Our results show that the proposed approach is effective in recognizing head gestures. The method can be integrated into a virtual reality system as a head gesture interface for interacting with virtual worlds.}, annote = {Oct 5-8, 2017 Banff, Canada}, author = {Zhao, J. and Allison, R. S.}, booktitle = {2017 {IEEE} {International} {Conference} on {Systems}, {Man}, and {Cybernetics} ({SMC})}, date-added = {2019-02-03 08:23:21 -0500}, date-modified = {2019-04-13 16:09:18 -0400}, doi = {10.1109/SMC.2017.8122975}, keywords = {Augmented & Virtual Reality}, month = 10, pages = {2361--2366}, title = {Real-time head gesture recognition on head-mounted displays using cascaded hidden {Markov} models}, url = {https://arxiv.org/abs/1707.06691}, url-1 = {https://doi.org/10.1109/SMC.2017.8122975}, year = {2017}, url-1 = {https://arxiv.org/abs/1707.06691}, url-2 = {https://doi.org/10.1109/SMC.2017.8122975}}
@inproceedings{Allison:ac, abstract = { York University has a long history of research in the perception of self-motion and orientation using purpose-built apparatus. Recently we developed and installed new facilities including new, more capable versions of Ian Howard's tumbling room and sphere devices: (1) The wide field stereoscopic environment is a projected, computer-generated, virtual environment that completely fills the participant's visual field with edgeless, high-resolution imagery. (2) The new tumbling room allows for full 360 degree rotation of the observer or the visual environment with near perfect visual fidelity. The room walls, floor and ceiling can be removed allowing for locomotion in a cylindrical environment. (3) The sphere environment allows for presenting full-field visual motion displays in pitch, roll or yaw while in a wide range of postures with respect to gravity. This presentation will overview the capabilities and illusions elicited in these devices as well as experiments to cross-validate the devices. }, annote = {22-26 October 2017, Fukuoka, Japan}, author = {Allison, R. S. and Harris, L. R. and Jenkin, M. R. M.}, booktitle = {Fechner Day 2017 Conference Proceedings, The 33rd Annual Meeting of the International Society for Psychophysics, 22-26 October 2017, Fukuoka, Japan}, date-added = {2018-09-18 10:10:23 -0400}, date-modified = {2018-09-18 10:10:23 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {360-361}, title = {Self-motion perception facilities at York University}, volume = {33}, year = {2017}}
@inproceedings{deas_estimation_2017, abstract = {Research on the role of human stereopsis has largely focused on laboratory studies that control or eliminate other cues to depth. However, in everyday environments we rarely rely on a single source of depth information. Despite this, few studies have assessed the impact of binocular vision on depth judgements in real-world scenarios presented in simulation. Here we conducted a series of experiments to determine if, and to what extent, stereoscopic depth provides a benefit for tasks commonly performed by helicopter aircrew. We assessed the impact of binocular vision and stereopsis on perception of (1) relative and (2) absolute distance above the ground (altitude) using natural and simulated stereoscopic-3D (S3D) imagery. The results showed that, consistent with the literature, binocular vision provides very weak input to absolute altitude estimates at high altitudes (10-100ft). In contrast, estimates of relative altitude at low altitudes (0-5ft) were critically dependent on stereopsis, irrespective of terrain type. These findings are consistent with the view that stereopsis provides important information for altitude judgments when close to the ground; while at high altitudes these judgments are based primarily on the perception of 2D cues.}, annote = {Winner of Best Paper (stereoscopic presentation) Stereoscopic Dsiplays and Applications 2017}, author = {Deas, Lesley M. and Allison, Robert S. and Hartle, Brittney and Irving, Elizabeth L. and Glaholt, Mackenzie and Wilcox, Laurie M.}, booktitle = {{IS\&T} International Symposium on Electronic Imaging 2017, Stereoscopic Displays and Applications XXVIII}, date-added = {2017-07-26 18:30:36 +0000}, date-modified = {2017-11-27 01:29:19 +0000}, doi = {10.2352/ISSN.2470-1173.2017.5.SD&A-355}, journal = {Electronic Imaging}, keywords = {Stereopsis}, month = 01, number = {5}, pages = {41--47}, title = {Estimation of {Altitude} in {Stereoscopic}-3D {Versus} 2D {Real}-world {Scenes}}, url = {http://www.ingentaconnect.com/content/ist/ei/2017/00002017/00000005/art00005}, url-1 = {http://www.ingentaconnect.com/content/ist/ei/2017/00002017/00000005/art00005}, url-2 = {http://dx.doi.org/10.2352/ISSN.2470-1173.2017.5.SD&A-355}, volume = {2017}, year = {2017}, url-1 = {http://www.ingentaconnect.com/content/ist/ei/2017/00002017/00000005/art00005}, url-2 = {https://doi.org/10.2352/ISSN.2470-1173.2017.5.SD&A-355}}
@inproceedings{allison_paper:_2017, abstract = {Recently the movie industry has been advocating the use of frame rates significantly higher than the traditional 24 frames per second. This higher frame rate theoretically improves the quality of motion portrayed in movies, and helps avoid motion blur, judder and other undesirable artifacts. Previously we reported that young adult audiences showed a clear preference for higher frame rates, particularly when contrasting 24 fps with 48 or 60 fps. We found little impact of shutter angle (frame exposure time) on viewers' choices. In the current study we replicated this experiment with an audience composed of imaging professionals who work in the film and display industry who assess image quality as an aspect of their everyday occupation. These viewers were also on average older and thus could be expected to have attachments to the film ``look'' both through experience and training. We used stereoscopic 3D content, filmed and projected at multiple frame rates (24, 48 and 60 fps), with shutter angles ranging from $90^{\circ}$ to $358^{\circ}$, to evaluate viewer preferences. In paired-comparison experiments we assessed preferences along a set of five attributes (realism, motion smoothness, blur/clarity, quality of depth and overall preference). As with the young adults in the earlier study, the expert viewers showed a clear preference for higher frame rates, particularly when contrasting 24 fps with 48 or 60 fps. We found little impact of shutter angle on viewers' choices, with the exception of one clip at 48 fps where there was a preference for larger shutter angle. However, this preference was found for the most dynamic ``warrior'' clip in the experts but in the slower moving ``picnic'' clip for the na{\:i}ve viewers. These data confirm the advantages afforded by high-frame rate capture and presentation in a cinema context in both na{\"\i}ve audiences and experienced film professionals. }, annote = { Stereoscopic Dsiplays and Applications 2017}, author = {Allison, Robert S. and Wilcox, Laurie M. and Anthony, Roy C. and Helliker, John and Dunk, Bert}, booktitle = {{IS\&T} International Symposium on Electronic Imaging 2017, Stereoscopic Displays and Applications XXVIII (Reprinted from Journal of Imaging Science and Technology)}, date-added = {2017-07-26 18:30:36 +0000}, date-modified = {2019-02-03 09:38:13 -0500}, doi = {10.2352/ISSN.2470-1173.2017.5.SD&A-353}, journal = {Electronic Imaging (Reprinted from Journal of Imaging Science and Technology)}, keywords = {Stereopsis}, month = 01, number = {5}, pages = {20--28}, title = {Paper: {Expert} {Viewers}' {Preferences} for {Higher} {Frame} {Rate} 3D {Film}}, url = {http://www.ingentaconnect.com/content/ist/ei/2017/00002017/00000005/art00003}, url-1 = {http://www.ingentaconnect.com/content/ist/ei/2017/00002017/00000005/art00003}, url-2 = {http://dx.doi.org/10.2352/ISSN.2470-1173.2017.5.SD&A-353}, volume = {2017}, year = {2017}, url-1 = {http://www.ingentaconnect.com/content/ist/ei/2017/00002017/00000005/art00003}, url-2 = {https://doi.org/10.2352/ISSN.2470-1173.2017.5.SD&A-353}}
@inproceedings{brunnstrom_industry_2017, author = {Brunnstr{\"o}m, K. and Allison, R. S. and Chandler, D. M. and Colett, H. and Corriveau, P. and Daly, S. and Goel, J. and Knopf, J. and Wilcox, L. M. and Yaacob, Y. and Yang, S.-N. and Zhang, Y.}, booktitle = {{IS\&T} International Symposium on Electronic Imaging 2017, Human Vision and Electronic Imaging 2017}, date-added = {2017-07-26 18:30:36 +0000}, date-modified = {2019-02-03 09:03:20 -0500}, doi = {10.2352/ISSN.2470-1173.2017.14.HVEI-131}, issn = {2470-1173}, journal = {Electronic Imaging}, keywords = {Image Quality}, language = {en}, month = 01, number = {14}, pages = {118--133}, title = {Industry and business perspectives on the distinctions between visually lossless and lossy video quality: {Mobile} and large format displays}, url = {http://www.ingentaconnect.com/content/10.2352/ISSN.2470-1173.2017.14.HVEI-131}, url-1 = {http://www.ingentaconnect.com/content/10.2352/ISSN.2470-1173.2017.14.HVEI-131}, url-2 = {http://dx.doi.org/10.2352/ISSN.2470-1173.2017.14.HVEI-131}, urldate = {2017-07-26}, volume = {2017}, year = {2017}, url-1 = {http://www.ingentaconnect.com/content/10.2352/ISSN.2470-1173.2017.14.HVEI-131}, url-2 = {https://doi.org/10.2352/ISSN.2470-1173.2017.14.HVEI-131}}
@inproceedings{Allison:ab, abstract = {VESA Display Stream Compression (DSC) is a light-weight codec designed for visually lossless compression over display links. Such high-performance algorithms must be evaluated subjectively to assess whether the codec meets visually lossless criteria. Here we present the first large-scale evaluation of DSC1.2 according to ISO/IEC 29170-2.}, annote = {SID Meeting LA }, author = {Allison, R. S. and Wilcox, L. M. and Wang, W. and Hoffman, D. M. and Hou, Y. and Goel, J. and Deas, L. and Stolitzka, D.}, booktitle = {SID Digest of Technical Papers}, date-added = {2017-04-14 15:16:12 +0000}, date-modified = {2017-06-13 11:48:47 +0000}, doi = {10.1002/sdtp.11838}, keywords = {Image Quality}, pages = {1101--1104}, title = {Large Scale Subjective Evaluation of Display Stream Compression}, url-1 = {http://dx.doi.org/10.1002/sdtp.11838}, volume = {48 (1)}, year = {2017}, url-1 = {https://doi.org/10.1002/sdtp.11838}}
@inproceedings{Zhao:aa, abstract = {We present a method for estimating the Motion-to-Photon (End-to-End) latency of head mounted displays (HMDs). The specific HMD evaluated in our study was the Oculus Rift DK2, but the procedure is general. We mounted the HMD on a pendulum to introduce damped sinusoidal motion to the HMD during the pendulum swing. The latency was estimated by calculating the phase shift between the captured signals of the physical motion of the HMD and a motion-dependent gradient stimulus rendered on the display. We used the proposed method to estimate both rotational and translational Motion-to-Photon latencies of the Oculus Rift DK2.}, annote = {18-22 March 2017 Los Angeles}, author = {Zhao, J. and Allison, R. S. and Vinnikov, M. and Jennings, S.}, booktitle = {IEEE Virtual Reality 2017}, date-added = {2017-04-14 14:54:33 +0000}, date-modified = {2017-04-14 14:54:33 +0000}, doi = {10.1109/VR.2017.7892302}, keywords = {Augmented & Virtual Reality}, pages = {313-314}, title = {Estimating the Motion-to-Photon Latency in Head Mounted Displays}, url-1 = {http://dx.doi.org/10.1109/VR.2017.7892302}, year = {2017}, url-1 = {https://doi.org/10.1109/VR.2017.7892302}}
@techreport{Hartle:2017pb, abstract = {While there is a long history of research in the contribution of binocular vision and stereoscopic depth perception to flight-based tasks, there is no consensus on its operational relevance. Evidence of such operational relevance is required to determine whether stereoscopic vision should be a requirement for Canadian Air Forces (CAF) aircrew, or if and when waivers can safely be permitted. In the experiments reported herein we examined the contribution of binocular vision to a simulated low hover helicopter flight task in which observers were asked to judge to relative distance between a virtual helicopter skid and the ground plane. Four terrain types were used, and observers were asked to make relative depth judgements monocularly and binocularly. In the first study a group of na{\"\i}ve observers was tested, and in the second experiment we tested a group of experienced aircrew. Our results show that the presence of stereopsis improves the accuracy of relative altitude judgements for low altitudes (below 5 feet) that are typical of low hover flight operations. Under monocular viewing conditions depth judgements were significantly less accurate. This pattern of results was seen in both experiments, with na{\"\i}ve undergraduate and trained aircrew. However, we found that the depth estimates of aircrew were more accurate than those of na{\"\i}ve observers under monocular viewing conditions, a result that may reflect situation-specific training during operational maneuvers. From an operational perspective, these results highlight the potential importance of binocular vision in performing low-hover tasks, and the impact of training on the use of specific depth cues. }, author = {Hartle, Brittney and Allison, Robert S. and Irving, Elizabeth L. and Wilcox, Laurie M.}, date-added = {2019-03-08 16:43:56 -0500}, date-modified = {2019-03-08 16:51:24 -0500}, institution = {CIMVHR Contract Report}, keywords = {Stereopsis}, number = {PWGSC Contract Number: W7714-145967}, title = {The effect of training on the use of binocular depth cues in low hover depth estimation}, year = {2017}}
@article{Allison:zp, abstract = {Recently the movie industry has been advocating the use of frame rates significantly higher than the traditional 24 frames per second. This higher frame rate theoretically improves the quality of motion portrayed in movies, and helps avoid motion blur, judder and other undesirable artifacts. Previously we reported that young adult audiences showed a clear preference for higher frame rates, particularly when contrasting 24 fps with 48 or 60 fps. We found little impact of shutter angle (frame exposure time) on viewers' choices. In the current study we replicated this experiment with an audience composed of imaging professionals who work in the film and display industry who assess image quality as an aspect of their everyday occupation. These viewers were also on average older and thus could be expected to have attachments to the ``film look'' both through experience and training. We used stereoscopic 3D content, filmed and projected at multiple frame rates (24, 48 and 60 fps), with shutter angles ranging from 90\degree to 358\degree , to evaluate viewer preferences. In paired-comparison experiments we assessed preferences along a set of five attributes (realism, motion smoothness, blur/clarity, quality of depth and overall preference). As with the young adults in the earlier study, the expert viewers showed a clear preference for higher frame rates, particularly when contrasting 24 fps with 48 or 60 fps. We found little impact of shutter angle on viewers' choices, with the exception of one clip at 48 fps where there was a preference for larger shutter angle. However, this preference was found for the most dynamic ``warrior'' clip in the experts but in the slower moving ``picnic'' clip for the na\''ive viewers. These data confirm the advantages afforded by high-frame rate capture and presentation in a cinema context in both na{\"\i}ve audiences and experienced film professionals. }, author = {Allison, R. S. and Wilcox, L. M. and Anthony, R. C. and Helliker, J and Dunk, A.}, date-added = {2016-09-12 20:15:43 +0000}, date-modified = {2019-02-03 09:04:09 -0500}, doi = {10.2352/J.ImagingSci.Technol.2016.60.6.060402}, journal = {Journal of Imaging Science and Technology (Also presented at {IS\&T} Stereoscopic Displays and Applications)}, keywords = {Stereopsis}, number = {6}, pages = {60402.1-60402.9}, title = {Expert Viewers' Preferences for Higher Frame Rate 3D Film}, url-1 = {http://dx.doi.org/10.2352/J.ImagingSci.Technol.2016.60.6.060402}, volume = {60}, year = {2016}, url-1 = {https://doi.org/10.2352/J.ImagingSci.Technol.2016.60.6.060402}}
@article{Allison:2016aa, author = {Allison, R. S. and Johnston, J. M. and Craig, G. and Jennings, S.}, date-added = {2016-08-16 23:24:18 +0000}, date-modified = {2018-11-25 14:23:17 -0500}, doi = {10.3390/s16081310}, journal = {Sensors}, keywords = {Misc.}, number = {8}, pages = {1310.1-1310.29}, title = {Airborne optical and thermal remote sensing for wildfire detection and monitoring}, url-1 = {http://dx.doi.org/10.3390/s16081310}, volume = {16}, year = {2016}, url-1 = {https://doi.org/10.3390/s16081310}}
@article{Tsirlin:yq, abstract = {Both the upper and lower disparity limits for stereopsis vary with the size of the targets. Recently, Tsirlin, Wilcox and Allison (2012) suggested that perceived depth magnitude from stereopsis might also depend on the vertical extent of a stimulus. To test this hypothesis we compared apparent depth in small discs to depth in long bars with equivalent width and disparity. We used three estimation techniques: a virtual ruler, a touch-sensor (for haptic estimates) and a disparity probe. We found that depth estimates were significantly larger for the bar stimuli than for the disc stimuli for all methods of estimation and different configurations. In a second experiment, we measured perceived depth as a function of the height of the bar and the radius of the disc. Perceived depth increased with increasing bar height and disc radius suggesting that disparity is integrated along the vertical edges. We discuss size-disparity correlation and inter-neural excitatory connections as potential mechanisms that could account for these results. }, author = {Tsirlin, I and Wilcox, L. M. and Allison, R. S.}, date-added = {2016-04-09 17:29:11 +0000}, date-modified = {2016-08-28 17:55:53 +0000}, doi = {10.1016/j.visres.2016.04.006}, journal = {Vision Research}, keywords = {Stereopsis}, pages = {41-45}, title = {Size matters: Perceived depth magnitude varies with stimulus height}, url-1 = {http://dx.doi.org/10.1016/j.visres.2016.04.006}, volume = {123}, year = {2016}, url-1 = {https://doi.org/10.1016/j.visres.2016.04.006}}
@article{Vinnikov:zr, abstract = {While stereoscopic content can be compelling, it is not always comfortable for users to interact with on a regular basis. This is because the stereoscopic content on displays viewed at a short distance has been associated with different symptoms such as eye-strain, visual discomfort, and even nausea. Many of these symptoms have been attributed to cue conflict, for example between vergence and accommodation. To resolve those conflicts, volumetric and other displays have been proposed to improve the user's experience. However, these displays are expensive, unduly restrict viewing position, or provide poor image quality. As a result, commercial solutions are not readily available. We hypothesized that some of the discomfort and fatigue symptoms exhibited from viewing in stereoscopic displays may result from a mismatch between stereopsis and blur, rather than between sensed accommodation and vergence. To find factors that may support or disprove this claim, we built a real-time gaze-contingent system that simulates depth of field (DOF) that is associated with accommodation at the virtual depth of the point of regard (POR). Subsequently, a series of experiments evaluated the impact of DOF on people of different age groups (younger versus older adults). The difference between short duration discomfort and fatigue due to prolonged viewing was also examined. Results indicated that age may be a determining factor for a user's experience of DOF. There was also a major difference in a user's perception of viewing comfort during short-term exposure and prolonged viewing. Primarily, people did not find that the presence of DOF enhanced short-term viewing comfort, while DOF alleviated some symptoms of visual fatigue but not all.}, author = {Vinnikov, M. and Allison, R. S. and Fernandes, S.}, date-added = {2016-03-01 23:27:46 +0000}, date-modified = {2016-04-26 14:03:59 +0000}, doi = {10.1016/j.ijhcs.2016.03.001}, journal = {International Journal of Human-Computer Studies}, keywords = {Eye Movements & Tracking}, pages = {37-51}, title = {Impact of Depth of Field Simulation on Visual Fatigue: Who are Impacted? and How?}, url = {http://percept.eecs.yorku.ca/papers/vinnikov dof.pdf}, url-1 = {http://dx.doi.org/10.1016/j.ijhcs.2016.03.001}, volume = {91}, year = {2016}, url-1 = {http://percept.eecs.yorku.ca/papers/vinnikov%20dof.pdf}, url-2 = {https://doi.org/10.1016/j.ijhcs.2016.03.001}}
@article{Palmisano:rz, author = {Palmisano, S. A. and Hill, H. and Allison, R. S.}, date-added = {2015-12-05 15:24:00 +0000}, date-modified = {2018-11-25 14:26:07 -0500}, doi = {10.1177/2041669515625793}, journal = {i-Perception}, keywords = {Stereopsis}, number = {1}, pages = {Article 2041669515625793, 1-24}, title = {The nature and timing of pseudoscopic experiences}, url = {http://ipe.sagepub.com/content/7/1/2041669515625793.full}, url-1 = {http://ipe.sagepub.com/content/7/1/2041669515625793.full}, url-2 = {http://dx.doi.org/10.1177/2041669515625793}, volume = {7}, year = {2016}, url-1 = {http://ipe.sagepub.com/content/7/1/2041669515625793.full}, url-2 = {https://doi.org/10.1177/2041669515625793}}
@article{Suryakumar:2020yq, author = {Suryakumar, R. and Allison, R.S.}, date-added = {2015-03-05 17:38:12 +0000}, date-modified = {2016-04-26 14:05:55 +0000}, doi = {10.1016/j.optom.2015.03.002}, journal = {Journal of Optometry}, keywords = {Eye Movements & Tracking, Stereopsis}, number = {1}, pages = {40-46}, title = {Accommodation and pupil responses to random-dot stereograms.}, url = {http://percept.eecs.yorku.ca/papers/suryakumar 2015.pdf}, url-1 = {http://dx.doi.org/10.1016/j.optom.2015.03.002}, volume = {9}, year = {2016}, url-1 = {http://percept.eecs.yorku.ca/papers/suryakumar%202015.pdf}, url-2 = {https://doi.org/10.1016/j.optom.2015.03.002}}
@incollection{Bunn:2016aa, abstract = {The traditionally subjective mobility gait and balance test -- the Tinetti -- is now an objective computer measurement. The Tinetti is a standard test for determining the risk of falling. With the help of York University (Computer Science \& Engineering and Health/Kinesiology), and support of the NSERC Engage program, the Mobility Assessment Tool, MAT, was developed as a non-invasive, reproducible, reliable test based on a modified Tinetti test. MAT uses the analysis of a three minute video of a subject sitting, standing up, sitting back down, walking a few paces, and turning in a circle and on the spot. The MAT analysis software runs on an off the shelf laptop computer to analyze the video taken with a standard Microsoft Kinect dual channel camera. Built into the camera is the separation of the moving subject from the background. It also overlays a twenty two point ``skeleton'' representing the movement of the skeleton-points of the subject. The analysis takes a few seconds and produces a measurement of thirty two different parameters of the subject's movement which is depicted by the skeleton points. Twenty two of these parameters are used to calculate the Tinetti score for the risk of falling (low, moderate, or high, risk). The discussion will focus on the simplicity and ease of use of the MAT as a diagnostic and tracking tool. Applications of the MAT include: 1) tracking the rehabilitation milestones for concussion patients, 2) monitoring the rehabilitation of stroke patients, 3) tracking the stabilization or deterioration of Alzheimer's patients.}, annote = {Toronto Falls \& Mobility Network Meeting Nov. 21, 2016}, author = {Bunn, F. and Allison, R. S. and Sergio, L. and Gorbet, D. and Bunn, S. and Zhao, J.}, booktitle = {Falls \& Mobility Network Meeting 2016 - Research and Innovative Clinical Practices.}, date-added = {2016-12-04 21:58:08 +0000}, date-modified = {2016-12-04 22:12:07 +0000}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, title = {Mobility Assessment Tool ({MAT}), Computer Vision for A Purely Objective Gait-Balance Test.}, year = {2016}}
@incollection{Sultana:2016fj, annote = {Frankfurt Germany July 11-15, 2016}, author = {Sultana, A. and Allison, R. S.}, booktitle = {IRTG Workshop, Frankfurt Germany July 11-15, 2016}, date-added = {2016-12-04 21:56:23 +0000}, date-modified = {2016-12-04 22:09:28 +0000}, keywords = {Stereopsis}, title = {Visual perception of transparent objects in real and virtual world}, year = {2016}}
@incollection{Sultana:2016aa, abstract = {Light plays an extremely important role in the perception of transparency, depth and shape of water. In computer graphics, an important question is: How does perception of transparent objects (including 3D shape reconstruction) depend on the fidelity of the rendering. In this paper, I present a theoretical method to recover the surface shape of water under day light settings based on human visual stereoscopic view. Flat transparent objects with arbitrary depth should pass all the incoming light and we should obtain a clear view of the background through them. However, when the surface is not flat, the image of the background or the underlying surface gets distorted because the chromatic light passing through different regions of water surface experience distortion and absorption that varies with wavelength. The reflection and refraction angles of light hitting the surface of a material depend on the direction of the light, its spectral composition, the medium and its surface shape. In this study, we evaluate and improve the cues available for perceiving shape of refractive objects by exploring the relationship in a 3D view between (a) Reflective highlights on the water surface that depends largely on the lightning conditions and refractive features (with known index of refraction) seen through the surface of the medium, (b) viewing and perceiving conditions (stereoscopic and/or non stereoscopic) and (c) textures and shading that provide cues to distortions. Analysis to date predicts that humans should have information to identify and reconstruct shape of an object in refractive stereo given that: i. Object is transparent and visible ii. Scene redirects incoming light just once and index of refraction is known. iii. Surface is both optically smooth and textured iv. At least two viewpoints should be available to obtain one 3D point on its light path at viewing surface. Psychophysical experiments that we are undertaking will either confirm or falsify human perceptual capability to identify and reconstruct shape of object under the above conditions.}, annote = {31 May - 3 June 2016 at the University of Granada, Spain}, author = {Sultana, A. and Allison, R. S.}, booktitle = {12th International Conference on Light and Color in Nature}, date-added = {2016-12-04 21:56:23 +0000}, date-modified = {2016-12-04 21:57:13 +0000}, keywords = {Stereopsis}, title = {Shape perception of water in photo-realistic 3D images}, year = {2016}}
@incollection{Zhao:2016aa, abstract = {We describe a unique new wide-field immersive stereoscopic environment (WISE) that can render real-time, interactive and immersive stereoscopic simulations. This recently constructed virtual environment allows for presenting seamless, high-resolution, high-contrast, binocular photorealistic renderings of challenging environments over the entire visual field. The display can present complex terrain with naturalistic texture and is equipped with motion tracking as well as linear and (soon) rotary treadmills (or the subject can be seated). Based on these capabilities we can present variations in simulated walking surfaces, potential obstacles, or interception targets and determine how these influence locomotor behaviour. We will describe experiments assessing the utility of mechanical repositioning interfaces as a locomotion interface.}, annote = {Sunday May 29th, 2016 Toronto}, author = {Zhao, J. and Allison, R. S.}, booktitle = {CAN CAPnet-CPS Satellite Meeting- Action \& Perception: Cognition, Coding and Clinical Populations}, date-added = {2016-09-12 20:17:26 +0000}, date-modified = {2016-09-12 20:17:26 +0000}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, title = {A Wide Field Immersive Display for the Study of Locomotor Behaviour}, year = {2016}}
@incollection{Fujii:2016aa, abstract = { Author Affiliations Journal of Vision September 2016, Vol.16, 184. doi:10.1167/16.12.184 Share E-mail Facebook Twitter Google Digg Delicious CiteULike Tumblr StumbleUpon }, author = {Fujii, Y. and Allison, R. S. and Guterman, P. and Wilcox, L. M.}, booktitle = {Journal of Vision (VSS Abstract)}, date-added = {2016-09-09 01:10:16 +0000}, date-modified = {2016-09-09 01:10:16 +0000}, doi = {10.1167/16.12.184}, keywords = {Stereopsis}, pages = {184}, title = {The effect of frame rate and motion blur on vection}, url-1 = {http://dx.doi.org/10.1167/16.12.184}, volume = {16}, year = {2016}, url-1 = {https://doi.org/10.1167/16.12.184}}
@inproceedings{Laldin:aa, abstract = {Stereoscopic media produce the sensation of depth through differences between the images presented to the two eyes. These differences arise from binocular parallax which in turn is caused by the separation of the cameras used to capture the scene. Creators of stereoscopic media face the challenge of depicting compelling depth while restricting the amount of parallax to a comfortable range. To address this tradeoff, stereoscopic warping or depth adjustment algorithms are used in the post-production process to selectively increase or decrease the depth in specific regions. This process modifies the image's depth-to-parallax mapping to suit the desired parallax range. As the depth is adjusted using non-linear parallax re-mapping functions, the geometric stereoscopic space is distorted. In addition, the relative expansion or compression of stereoscopic space should theoretically affect the perceived acceleration of an object passing through that region. Here we evaluate this prediction and determine if stereoscopic warping affects viewers' perception of acceleration. Observers judged the perceived acceleration of an approaching object (a toy helicopter) moving in depth through a complex stereoscopic 3D scene. The helicopter flew at one of two altitudes, either ground level or camera level. For each altitude, stereoscopic animations were produced under three depth re-mapping conditions i) compressive, ii) expansive, and iii) zero (no re-mapping) for a total of six test conditions. We predicted that expansive depth re-mapping would produce a bias toward perceiving deceleration of the approaching helicopter, while compressive depth re-mapping would result in a bias toward seeing acceleration. However, there was no significant difference in the amount or direction of bias between the re-mapping conditions. We did find a significant effect of the helicopter altitude, such that there was little bias in acceleration judgments when the helicopter moved at ground level but a significant bias towards reporting acceleration when the helicopter moved at camera level. This result is consistent with the proposal that observers can make use of additional monocular (2D) cues in the ground level condition to improve their acceleration estimates. The lack of an effect of depth re-mapping suggests that viewers have considerable tolerance to depth distortions resulting from stereoscopic post-processing. These results have important implications for effective post-production and quality assurance for stereoscopic 3D content creation. }, annote = {Leige 13-14 Dec. 2016 }, author = {Laldin, S. and Wilcox, L. M. and Allison, R. S.}, booktitle = {2016 International Conference on 3D Imaging (IC3D)}, date-added = {2016-12-04 21:59:19 +0000}, date-modified = {2017-02-04 15:59:47 +0000}, doi = {10.1109/IC3D.2016.7823446}, keywords = {Stereopsis}, organization = {IEEE}, pages = {1-8}, title = {The Effects of Depth Warping on Perceived Acceleration in Stereoscopic Animation}, url-1 = {http://dx.doi.org/10.1109/IC3D.2016.7823446}, year = {2016}, url-1 = {https://doi.org/10.1109/IC3D.2016.7823446}}
@inproceedings{Walker:fr, abstract = {High-fidelity general-purpose robotic simulators are a special class of simulator designed to simulate all the components of a real-world robotics system, including autonomous air vehicles and planetary exploration rovers, so that a real-world system can be tested and verified before/during deployment on the real-world hardware. General-purpose robotic simulators can simulate sensors, actuators, obstacles, terrains, environments, physics, lighting, fluids, and air particles, while also providing a means to verify the system's autonomous algorithms by using the simulated vehicle in place of the real-world one. General-purpose robotic simulators are typically coupled with an abstract robotic control interface so that autonomous systems evaluated on the simulated vehicles can be deployed, unchanged, on the corresponding real-world vehicles and vice versa. However, the problem with the current technology and research is that neither the robotic simulators nor the robotic control interfaces support Hard Real-Time capabilities, and cannot guarantee that Hard Real-Time constraints will be met. The lack of Hard Real-Time support has major implications on both the utility and the validity of the simulation results and the functioning of the real- world autonomous vehicle. As a solution, this paper will present Hard-RTSim, a novel hard real-time simulation framework that will: 1) Bring Hard Real-Time support to general-purpose robotic simulators; and 2) Bring Hard Real-Time support to abstract robotic control interfaces. Hard-RTSim guarantees that simulated events in the environment or modeled vehicle are produced and handled with finite (bounded) accuracy and precision. Furthermore it improves these temporal responses to ensure these bounds are representative of temporal requirements for a wide range of scenarios. The Hard-RTSim framework ensures that the simulator and the hard real-time processes will actually get to use the CPU when they request/need it, no matter how many other processes are loaded on the CPU. The experimental results of using the Hard-RTSim framework compared to not using it yield a huge improvement in responsiveness and reliability. There is an improvement of 35\% when the CPU is minimally loaded and then as the CPU load is increased the improvement increases as well, all the way up to a 98\% improvement when the CPU is loaded at its maximum. These substantial improvements in precision and reliability will help to further the state of space exploration, aerospace technology, and produce better and more reliable autonomous aerial vehicles and planetary exploration rovers. }, annote = { 4 - 8 January 2016 | San Diego, California AIAA Science and Technology Forum and Exposition (SciTech 2016) }, author = {Walker, S. M. and Shan, J. and Allison, R. S.}, booktitle = {AIAA Modeling and Simulation Technologies Conference, AIAA SciTech}, date-added = {2015-12-05 15:21:38 +0000}, date-modified = {2016-08-28 18:02:55 +0000}, doi = {10.2514/6.2016-16}, keywords = {Misc.}, number = {AIAA 2016-1667}, pages = {1667.1-1667.12}, title = {Hard Real-Time General-Purpose Robotic Simulations of Autonomous Air Vehicles}, url = {http://percept.eecs.yorku.ca/papers/FINAL SUBMISSION ShawnWalkerHardRTSim.pdf}, url-1 = {http://dx.doi.org/10.2514/6.2016-16}, year = {2016}, url-1 = {http://percept.eecs.yorku.ca/papers/FINAL%20SUBMISSION%20ShawnWalkerHardRTSim.pdf}, url-2 = {https://doi.org/10.2514/6.2016-16}}
@article{Wilcox:2015tap, abstract = {High frame rate movie-making refers to the capture and projection of movies at frame rates several times higher than the traditional 24 frames per second. This higher frame rate theoretically improves the quality of motion portrayed in movies, and helps avoid motion blur, judder and other undesirable artefacts. However, there is considerable debate in the cinema industry regarding the acceptance of HFR content given anecdotal reports of hyper-realistic imagery that reveals too much set and costume detail. Despite the potential theoretical advantages, there has been little empirical investigation of the impact of high-frame rate techniques on the viewer experience. In this study we use stereoscopic 3D content, filmed and projected at multiple frame rates (24, 48 and 60 fps), with shutter angles ranging from $90^{\circ}$ to $358^{\circ}$, to evaluate viewer preferences. In a paired-comparison paradigm we assessed preferences along a set of five attributes (realism, motion smoothness, blur/clarity, quality of depth and overall preference). The resulting data show a clear preference for higher frame rates, particularly when contrasting 24 fps with 48 or 60 fps. We found little impact of shutter angle on viewers' choices, with the exception of one measure (motion smoothness) for one clip type. These data are the first empirical evidence of the advantages afforded by high frame rate capture and presentation in a cinema context. }, annote = {ACM SIGGRAPH Symposium on Applied Perception September 13-14, 2015 - Tuebingen, Germany At the Max Planck Institute for Biological Cybernetics}, author = {Wilcox, L.M. and Allison, R. S. and Helliker, J. and Dunk, A. and Anthony, R.C.}, date-added = {2015-06-28 11:16:16 +0000}, date-modified = {2016-01-03 03:24:53 +0000}, doi = {10.1145/2810039}, journal = {{ACM} Transactions on Applied Perception ({TAP})}, keywords = {Stereopsis}, number = {14}, pages = {Article 15}, title = {Evidence that viewers prefer higher frame rate film}, url = {http://dl.acm.org/citation.cfm?id=2821016.2810039}, url-1 = {http://dl.acm.org/citation.cfm?id=2821016.2810039}, url-2 = {http://dx.doi.org/10.1145/2810039}, volume = {12}, year = {2015}, url-1 = {http://dl.acm.org/citation.cfm?id=2821016.2810039}, url-2 = {https://doi.org/10.1145/2810039}}
@article{Allison:ty, author = {Allison, R. S. and Wilcox, L. M.}, date-added = {2015-05-01 18:47:53 +0000}, date-modified = {2016-01-03 03:23:35 +0000}, doi = {10.1145/2770875}, journal = {{ACM} Transactions on Applied Perception}, keywords = {Stereopsis}, number = {3}, pages = {Article 10, 1-20}, title = {Perceptual tolerance to stereoscopic 3D image distortion}, url-1 = {http://dx.doi.org/10.1145/2770875}, volume = {12}, year = {2015}, url-1 = {https://doi.org/10.1145/2770875}}
@article{Palmisano:2015db, author = {Palmisano, S. A. and Allison, R. S. and Schira, M. M. and Barry, R. J.}, date-added = {2015-02-07 05:10:09 +0000}, date-modified = {2018-11-25 14:31:04 -0500}, doi = {10.3389/fpsyg.2015.00193}, journal = {Frontiers in Psychology Research, Perception Science}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {Article 193, 1-15}, title = {Future Challenges for Vection Research: Definitions, Functional Significance, Measures and Neural Bases}, url-1 = {http://dx.doi.org/10.3389/fpsyg.2015.00193}, volume = {6}, year = {2015}, url-1 = {https://doi.org/10.3389/fpsyg.2015.00193}}
@incollection{Guterman:nr, abstract = {Changing head tilt relative to gravity changes the dynamic sensitivity of the otoliths to linear accelerations (gravitational and inertial). We explored whether visually induced self-motion (vection) is influenced by varying head tilt and optic flow direction with respect to gravity. We previously found that vection was enhanced when upright observers viewed vertical optic flow (i.e., simulating self-motion along the spinal axis) compared to horizontal flow. We hypothesized that if this benefit was due to aligning the visual motion signal with gravity, then inter-aural lamellar flow while laying on the side would provide a similar vection advantage. Observers stood and lay supine, prone, left and right side down, while viewing a translating random dot pattern simulating self-motion along the spinal or inter-aural axis. Vection magnitude estimates, onset, and duration were recorded. The results showed that aligning the direction of visual motion and gravity enhanced vection in side-laying observers, but when gravity was irrelevant---as in the supine and prone posture---spinal axis motion enhanced vection.However, perceived scene rigidity varied with head orientation (e.g., dots were seen as floating bubbles), so the issue of scene rigidity was examined by comparing vection in two environments: a rigid pipe structure which looked like a complex arrangement of plumbing pipes, and a field of dots. The results of varying head, motion direction, and perceived scene rigidity, will be discussed and may provide insight into whether self-motion perception is determined by a weighted summation of visual and vestibular signals. }, author = {Guterman, P. and Allison, R. S.}, booktitle = {Centre for Vision Research International Conference on Perceptual Organization}, date-added = {2015-06-23 11:38:16 +0000}, date-modified = {2015-06-23 11:38:16 +0000}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = {June 23-26, 2015}, pages = {65}, publisher = {York University, Toronto}, title = {Effects of head orientation and scene rigidity on vection}, year = {2015}}
@incollection{Allison:rt, abstract = {Civilian operations are an important and growing application of night vision goggles (NVGs). Such devices extend human sensory capabilities but also introduce perceptual artefacts. In a series of laboratory experiments and helicopter-based flight trials we analyzed subject performance on model tasks based on typical civilian aviation applications. In the context of security and search operations the tasks included directed search over open and forested terrain, detection and identification of a temporary landing zone and search/tracking of a moving vehicle marked with a covert IR marker. Two other sets of flight trials explored the potential of night-vision aids in aerial wildfire detection; one was a controlled experiment and the other part of operational aerial detection patrols. The results of these studies confirm that NVGs can provide significant operational value but also illustrate the limitations of the technology and the ability of human operators to compensate for perceptual distortions.}, annote = {Carleton University will host the 25th Annual Meeting of the Canadian Society for Brain, Behaviour and Cognitive Science (CSBBCS) from June 5-7, 2015.}, author = {Allison, R. S. and Jennings, S. and Craig, G.}, booktitle = {25th Annual Meeting of the Canadian Society for Brain, Behaviour and Cognitive Science (CSBBCS), Canadian Journal of Experimental Psychology}, date-added = {2015-06-14 15:20:51 +0000}, date-modified = {2016-01-03 03:08:06 +0000}, doi = {10.1037/cep0000076}, journal = {Canadian Journal of Experimental Psychology}, keywords = {Night Vision}, number = {4}, pages = {348}, title = {Visual Perception and Performance during NVG-aided Civilian Helicopter Flight}, url-1 = {http://dx.doi.org/10.1037/cep0000076}, volume = {69}, year = {2015}, url-1 = {https://doi.org/10.1037/cep0000076}}
@incollection{Kirollos:kx, abstract = {Vection is an illusion of visually-induced self-motion in a stationary observer. This functional magnetic resonance imaging (fMRI) study measured psychophysical and blood oxygenation level-dependent (BOLD) responses to two types of visual stimuli: coherent optic flow stimuli and scrambled versions which preserved local, but disrupted global, motion information. The coherent optic flow stimuli produced robust percepts of vection while the scrambled stimuli produced little or no vection. The cingulate sulcus visual area (CSv) showed the clearest selective activation for coherent optic flow compared to incoherent (scrambled) flow suggesting that CSv is heavily involved in self-motion processing.}, author = {Kirollos, R. and Allison, R. S. and Palmisano, S. A.}, booktitle = {25th Annual Meeting of the Canadian Society for Brain, Behaviour and Cognitive Science (CSBBCS), Canadian Journal of Experimental Psychology}, date-added = {2015-06-14 15:20:51 +0000}, date-modified = {2016-01-03 03:08:35 +0000}, doi = {10.1037/cep0000076}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {4}, pages = {369}, title = {The neural correlates of vection - an fMRI study}, url-1 = {http://dx.doi.org/10.1037/cep0000076}, volume = {69}, year = {2015}, url-1 = {https://doi.org/10.1037/cep0000076}}
@incollection{Cutone:uq, abstract = {Westheimer and McKee (1978, Journal of the Optical Society of America, 68(4), 450-455) reported that stereoacuity is unaffected by the speed of moving vertical line targets by up to 2 deg/s. Subsequent studies found that thresholds rise exponentially at higher velocities (Ramamurthy, Patel & Bedell, 2005, Vision Research, 45(6), 789-799). This decrease in sensitivity has been attributed to retinal motion smearing; however, these experiments have not taken into account the additional effects of display persistence. Here we reassess the effects of lateral velocity on stereoacuity in the absence of display persistence, using physically moving stimuli. Luminous vertical line targets were mounted on computer-controlled motion stages. This purpose-built system permitted precise control of target position and movement, in three dimensions. In a 1IFC paradigm with 120ms viewing duration, observers fixated a stationary point and discriminated the relative depth of the two moving lines. The velocity of the line pair ranged from 0 (stationary) to 16 deg/s; each speed was tested in a separate block of trials. Our results confirm the resilience of stereoacuity to lateral retinal motion at velocities less than 2 deg/s. At higher speeds, for all observers thresholds increased marginally with speed. The rate of increase was 0.6 arc seconds per deg/s which was approximately 10 times smaller than reported by Ramamurthy et al. (2005). It is clear that stereoacuity is more robust to lateral motion than previously believed; we argue that the threshold elevation reported previously is due to display persistence.}, author = {Cutone, M. and Allison, R. S. and Wilcox, L. M.}, booktitle = {Journal of Vision (VSS Abstracts)}, date-added = {2015-06-14 15:20:51 +0000}, date-modified = {2015-09-02 06:49:38 +0000}, doi = {10.1167/15.12.380}, keywords = {Stereopsis}, number = {12}, pages = {380}, title = {Stereoacuity for physically moving targets is unaffected by retinal motion}, url-1 = {http://dx.doi.org/10.1167/15.12.380}, volume = {15}, year = {2015}, url-1 = {https://doi.org/10.1167/15.12.380}}
@incollection{Kirollos:vn, author = {Kirollos, R. and Allison, R. S. and Palmisano, S. A.}, booktitle = {Journal of Vision (VSS Abstracts)}, date-added = {2015-06-14 15:20:51 +0000}, date-modified = {2015-09-02 06:51:02 +0000}, doi = {10.1167/15.12.1007}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {12}, pages = {1007}, title = {The neural correlates of vection - an fMRI study}, url-1 = {http://dx.doi.org/10.1167/15.12.1007}, volume = {15}, year = {2015}, url-1 = {https://doi.org/10.1167/15.12.1007}}
@incollection{Vinnikov:bh, abstract = {Heading perception depends on the ability of different regions of the visual field to extract accurate information about the direction of the visual flow. Hence due to its ability to extract the most accurate information, the central visual field plays a major role in heading estimation. With experience people learn to utilize other regions especially if there is central field loss/impairment. Nevertheless, it is not clear what happens when information in central vision becomes altered or cannot be picked up. In the present study, we examined the effects of gaze-contingent alteration of regions of the visual field on heading. On each trial, one of six different directions of self-motion were simulated ( headings $\pm 7.5^{\circ}$, $\pm 5.0^{\circ}$ and $\pm 2.5 ^{\circ}$ from the centre of the screen). The simulated defects were analogous to two typical visual field disturbances resulting from macular degeneration, either metamorphopsia or scotomas. Specifically, with a force choice procedure we compared performance with no visual defects to that with five different simulated defects (either $5^{\circ}$ or $10^{\circ}$ horizontal perturbations, $5^{\circ}$ or $10^{\circ}$ Gaussian perturbations, or a $10^{\circ}$ scotoma). We also looked at three gaze conditions - free viewing, directional viewing and tracking features in the scene. Heading performance was not significantly different in the two environments examined (translation over a plane covered with blue particles or through a forest). Performance declined in the presence of simulated visual defects, as well as when they were instructed to visually track specific scene features. Performance was most accurate for all heading directions during the free view conditions. We conclude that when people are free to direct their gaze in the scene they are able to minimize the impact of simulated central visual field loss/distortion. }, author = {Vinnikov, M. and Allison, R. S. and Palmisano, S. A.}, booktitle = {Journal of Vision (VSS Abstracts)}, date-added = {2015-06-14 15:20:51 +0000}, date-modified = {2019-02-03 09:28:15 -0500}, doi = {10.1167/15.12.1015}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {12}, pages = {1015}, title = {Heading Perception with Simulated Visual Defects}, url-1 = {http://dx.doi.org/10.1167/15.12.1015}, volume = {15}, year = {2015}, url-1 = {https://doi.org/10.1167/15.12.1015}}
@incollection{Guterman:ys, abstract = {Changing head orientation with respect to gravity changes the dynamic sensitivity of the otoliths to linear accelerations (gravitational and inertial). We explored whether varying head orientation and optic flow direction relative to gravity affects the perception of visually induced self-motion (vection). We previously found that vection was enhanced when upright observers viewed lamellar flow that moved vertically relative to the head (i.e., simulating self motion along the spinal axis) compared to horizontal flow. We hypothesized that if this benefit was due to aligning the simulated self-motion with gravity, then inter-aural (as opposed to spinal) axis motion while laying on the side would provide a similar vection advantage. Alternatively, motion along the spinal axis could enhance vection regardless of head orientation relative to gravity. Observers stood and lay supine, prone, left and right side down, while viewing a translating random dot pattern that simulated observer motion along the spinal or inter-aural axis. Vection magnitude estimates, onset, and duration were recorded. The results showed that aligning the optic flow direction with gravity enhanced vection in side-laying observers, but when overlapping these signals was not possible as in the supine and prone posture---spinal axis motion enhanced vection. However, perceived scene rigidity varied with head orientation (e.g., dots were seen as floating bubbles in some conditions). To examine the issue of scene rigidity, we compared vection during simulated motion with respect to two environments: a rigid pipe structure, which looked like a complex arrangement of plumbing pipes, and a field of dots. The results of varying head and motion direction and perceived scene rigidity will be discussed, and may provide insight into whether self-motion perception is determined by a weighted summation of visual and vestibular inputs. }, author = {Guterman, P. and Allison, R. S.}, booktitle = {Journal of Vision (VSS Abstracts)}, date-added = {2015-06-14 15:20:51 +0000}, date-modified = {2015-09-02 06:49:43 +0000}, doi = {10.1167/15.12.862}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {12}, pages = {862}, title = {The influence of scene rigidity and head tilt on vection.}, url-1 = {http://dx.doi.org/10.1167/15.12.862}, volume = {15}, year = {2015}, url-1 = {https://doi.org/10.1167/15.12.862}}
@incollection{Allison:fk2, abstract = {The ability to make sense of cluttered auditory environments is convincingly demonstrated in the so-called cocktail party effect. This ability of a listener to separate a speech signal of interest from competing speech signals and background noise is greatly improved when they have normal binaural cues to the spatial location of the speaker. However, in most media applications, including virtual reality and telepresence, the audio information is impoverished. We hypothesized that a listener's spatial auditory attention could be simulated based on visual attention. Since interlocutors typically look at their conversational partner, we used gaze as an indicator of current conversational interest. We built a gaze-contingent display that modified the volume of the speakers' voices contingent on the current region of overt attention. We found that a rapid increase in amplification of the attended speaker combined with attenuation but not elimination of competing sounds (partial rather than absolute selection) was most natural and improved source recognition. In conclusion, audio gaze-contingent displays offer potential for simulating rich, natural social and other interactions in virtual environments.}, annote = {The European Conference on Visual Perception (ECVP) will take place between August 23rd and August 27th on the campus of the University of Liverpool.}, author = {Allison, R. S. and Vinnikov, M.}, booktitle = {European Conference on Visual Perception, {ECVP} 2015}, date-added = {2015-06-14 15:20:51 +0000}, date-modified = {2015-11-17 12:52:56 +0000}, doi = {10.1177/0301006615598674}, keywords = {Eye Movements & Tracking}, pages = {81}, publisher = {Perception}, title = {Simulating spatial auditory attention in a gaze contingent display: The virtual cocktail party}, url-1 = {http://dx.doi.org/10.1177/0301006615598674}, volume = {44(S1)}, year = {2015}, url-1 = {https://doi.org/10.1177/0301006615598674}}
@incollection{Fujii:2015fk, abstract = {Digital technologies allow movies to be exhibited at frame rates much higher than the traditional 24 fps. High frame rate (HFR) movies being released in theaters and it is assumed that HFR will reduce artifacts and enhance quality of motion in 2-D and 3-D media. The goal of this project is to assess this assumption empirically by basic measurement of motion perception. In a series of experiments we measured lateral (2-D) and in depth (3-D) global motion coherence thresholds using random-dot patterns in a mirror stereoscope and a 3D projection system. The refresh rate of the display was fixed at 96 Hz, and we manipulated the flash protocol to create 96 (single flash), 48 (double flash) and 24 (quadruple flash) frames per second. Simulated linear velocity of the elements through space was equated in the 2-D and 3-D conditions. Conditions were randomly interleaved using the method of constant stimuli and a two-interval forced-choice procedure to measure the proportion of coherent elements required to reliably detect global motion. Results showed no consistent effect of flash protocol on coherence thresholds in either the 2-D or the 3-D conditions in both the stereoscope and 3D projection system. Our results show that while frame rate influences local 2-D motion processing, it has no apparent impact on global lateral, or in depth, motion coherence perception. This indicates that progression in quality of motion signal does not always enhance perception. }, annote = {June 23-26, 2015 York University}, author = {Fujii, Y. and Allison, Robert S. and Shen, L. and Wilcox, Laurie M.}, booktitle = {Centre for Vision Research International Conference on Perceptual Organization}, date-added = {2015-06-14 15:20:51 +0000}, date-modified = {2015-06-23 11:37:53 +0000}, keywords = {Stereopsis}, language = {en}, month = {June 23-26, 2015}, pages = {55}, publisher = {York University, Toronto}, title = {The effects of high frame rate on perception of 2-D and 3-D global coherent motion}, url-1 = {http://dx.doi.org/10.1167/14.15.55}, year = {2015}}
@inproceedings{Wilcox:2015ty, abstract = {High frame rate movie-making refers to the capture and projection of movies at frame rates several times higher than the traditional 24 frames per second. This higher frame rate theoretically improves the quality of motion portrayed in movies, and helps avoid motion blur, judder and other undesirable artefacts. However, there is considerable debate in the cinema industry regarding the acceptance of HFR content given anecdotal reports of hyper-realistic imagery that reveals too much set and costume detail. Despite the potential theoretical advantages, there has been little empirical investigation of the impact of high-frame rate techniques on the viewer experience. In this study we use stereoscopic 3D content, filmed and projected at multiple frame rates (24, 48 and 60 fps), with shutter angles ranging from $90^{\circ}$ to $358^{\circ}$, to evaluate viewer preferences. In a paired-comparison paradigm we assessed preferences along a set of five attributes (realism, motion smoothness, blur/clarity, quality of depth and overall preference). The resulting data show a clear preference for higher frame rates, particularly when contrasting 24 fps with 48 or 60 fps. We found little impact of shutter angle on viewers' choices, with the exception of one measure (motion smoothness) for one clip type. These data are the first empirical evidence of the advantages afforded by high frame rate capture and presentation in a cinema context. }, annote = {ACM SIGGRAPH Symposium on Applied Perception September 13-14, 2015 - Tuebingen, Germany At the Max Planck Institute for Biological Cybernetics}, author = {Wilcox, L.M. and Allison, R. S. and Helliker, J. and Dunk, A. and Anthony, R.C.}, booktitle = {ACM Symposium on Applied Perception (paper was published in a special issue of ACM TAP)}, date-added = {2015-06-28 11:16:16 +0000}, date-modified = {2016-01-03 03:25:02 +0000}, keywords = {Stereopsis}, month = {September 13-14, 2015}, title = {Evidence that viewers prefer higher frame rate film}, year = {2015}}
@inproceedings{Marianovski:2015yu, abstract = {There is growing interest in capturing and projecting movies at higher frame rates than the traditional 24 frames per second. Yet there has been little scientific assessment of the impact of higher frame rates (HFR) on the perceived quality of cinema content. Here we investigated the effect of frame rate, and associated variables (shutter angle and camera motion) on viewers' ability to discriminate letters in S3D movie clips captured by a professional film crew. The footage was filmed and projected at varying combinations of frame rate, camera speed and shutter angle. Our results showed that, overall, legibility improved with increased frame rate and reduced camera velocity. However, contrary to expectations, there was little effect of shutter angle on legibility. We also show that specific combinations of camera parameters can lead to dramatic reductions in legibility for localized regions in a scene. }, annote = {ACM SIGGRAPH Symposium on Applied Perception September 13-14, 2015 - Tuebingen, Germany At the Max Planck Institute for Biological Cybernetics}, author = {Marianovski, M. and Wilcox, L.M. and Allison, R. S.}, booktitle = {Proceedings of the {ACM SIGGRAPH} Symposium on Applied Perception}, date-added = {2015-06-28 11:16:16 +0000}, date-modified = {2016-01-03 03:20:59 +0000}, doi = {10.1145/2804408.2804411}, keywords = {Stereopsis}, month = {September 13-14, 2015}, pages = {67-73}, title = {Evaluation of the Impact of High Frame Rates on Legibility in {S3D} Film}, url = {http://dl.acm.org/citation.cfm?id=2804408.2804411}, url-1 = {http://dl.acm.org/citation.cfm?id=2804408.2804411}, url-2 = {http://dx.doi.org/10.1145/2804408.2804411}, volume = {SAP '15}, year = {2015}, url-1 = {http://dl.acm.org/citation.cfm?id=2804408.2804411}, url-2 = {https://doi.org/10.1145/2804408.2804411}}
@inproceedings{Zhao:ve, abstract = {Patients with concussions, strokes and neuromuscular disease such as Parkinson's disease, often have difficulties in keeping balance and suffer from abnormal gaits. Gait assessment conducted by a physician or therapist in clinics is standard clinical practice for assessing such injuries. However, this approach is subjective, leading to potential problems of unrepeatability, poor sensitivity and unreliability. To conduct the assessment in an objective way, a computer-based gait assessment system is designed and presented in this paper. The system performs assessments on dynamic balance and gaits by analyzing the skeleton frames of a subject captured by the Microsoft Kinect RGB-D sensor. Results show that the proposed system effectively scores subjects.}, annote = { 37th Annual International Conference of the IEEE Engineering in Medicine and Biology Society MiCo - Milano Conference Center - Milan, Italy, August 25-29 2015 }, author = {Zhao, J. and Bunn, F. E. and Perron, J. M. and Shen, E. and Allison, R. S.}, booktitle = {37th Annual {IEEE} Engineering in Medicine and Biology Conference}, date-added = {2015-06-14 15:26:59 +0000}, date-modified = {2015-11-17 12:37:57 +0000}, doi = {10.1109/EMBC.2015.7319925}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = {August 25-29 2015}, pages = {6679 - 6683}, title = {Gait Assessment using the Kinect {RGB-D} Sensor}, url-1 = {http://dx.doi.org/10.1109/EMBC.2015.7319925}, year = {2015}, url-1 = {https://doi.org/10.1109/EMBC.2015.7319925}}
@inproceedings{Hartle:2015rr, abstract = { While heuristics have evolved over decades for the capture and display of conventional 2D film, it is not clear these always apply well to stereoscopic 3D (S3D) film. Recently there has been considerable research on viewer comfort in 3D media, but little attention has been paid to audience preferences for filming parameters in S3D. Here we evaluate observers' preferences for moving S3D film content in a theatre setting. Specifically, we examine preferences for combinations of camera motion (speed and direction) and stereoscopic depth (IA). The amount of IA had no impact on clip preferences regardless of the direction or speed of camera movement. However, preferences were influenced by camera speed, but only in the in-depth condition where observers preferred faster motion. This initially seems contrary to previous research, which shows that slower speeds are more comfortable for viewing S3D content. Since most studies of visual comfort focus on visual fatigue, there may be different underlying influences. Given the apparent discrepancy between the visual comfort literature and the preference results reported here, it is clear that viewer response to S3D film is complex and that decisions made to enhance comfort may in some instances produce less appealing content. }, annote = {Preference for motion and depth in 3D film, as an Oral presentation to be presented 10 February 2015. This conference is part of IS\&T/SPIE Electronic Imaging 2015 which will be held 8-12 February at the Hilton San Francisco, Union Square, San Francisco, California, United States. PLEASE SAVE OR PRINT THIS MESSAGE FOR FUTURE REFERENCE AS IT PROVIDES IMPORTANT DETAILS FOR THIS EVENT. PAPER NUMBER: 9391-23}, author = {Hartle, B. and Lugtigheid, Arthur J. and Kazimi, A. and Allison, R. S. and Wilcox, L. M.}, booktitle = {IS\&T/SPIE Electronic Imaging 2015, Stereoscopic Displays and Applications XXVI, Proc. SPIE}, date-added = {2015-01-26 19:18:13 +0000}, date-modified = {2016-01-03 03:23:54 +0000}, doi = {10.1117/12.2079330}, editor = {Nicolas S. Holliman and Andrew J. Woods and Gregg E. Favalora and Takashi Kawai}, keywords = {Stereopsis}, month = {Feb 8-12}, pages = {93910R, 1-10}, title = {Preference for motion and depth in 3D film}, url = {http://percept.eecs.yorku.ca/papers/Hartle - SDA 2015.pdf}, url-1 = {http://percept.eecs.yorku.ca/papers/Hartle%20-%20SDA%202015.pdf}, volume = {9391}, year = {2015}, url-1 = {http://percept.eecs.yorku.ca/papers/Hartle%20-%20SDA%202015.pdf}, url-2 = {https://doi.org/10.1117/12.2079330}}
@article{palmisano_evidence_2014, abstract = {Visual-vestibular conflicts have been traditionally used to explain both perceptions of self-motion and experiences of motion sickness. However, sensory conflict theories have been challenged by findings that adding simulated viewpoint jitter to inducing displays enhances (rather than reduces or destroys) visual illusions of self-motion experienced by stationary observers. One possible explanation of this jitter advantage for vection is that jittering optic flows are more ecological than smooth displays. Despite the intuitive appeal of this idea, it has proven difficult to test. Here we compared subjective experiences generated by jittering and smooth radial flows when observers were exposed to either visual-only or multisensory self-motion stimulations. The display jitter (if present) was generated in real-time by updating the virtual computer-graphics camera position to match the observer's tracked head motions when treadmill walking or walking in place, or was a playback of these head motions when standing still. As expected, the (more naturalistic) treadmill walking and the (less naturalistic) walking in place were found to generate very different physical head jitters. However, contrary to the ecological account of the phenomenon, playbacks of treadmill walking and walking in place display jitter both enhanced visually induced illusions of self-motion to a similar degree (compared to smooth displays). }, author = {Palmisano, S. A. and Allison, R. S. and Ash, April and Nakamura, Shinji and Apthorp, Deborah}, date-added = {2014-10-28 14:20:56 +0000}, date-modified = {2015-07-10 22:45:36 +0000}, doi = {10.3389/fpsyg.2014.01297}, journal = {Frontiers in Psychology Research, Perception Science}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {Article 1297}, pages = {1-9}, title = {Evidence Against an Ecological Explanation of the Jitter Advantage for Vection}, url = {http://journal.frontiersin.org/Journal/10.3389/fpsyg.2014.01297/}, url-1 = {http://journal.frontiersin.org/Journal/10.3389/fpsyg.2014.01297/abstract}, url-2 = {http://dx.doi.org/10.3389/fpsyg.2014.01297}, urldate = {2014-10-28}, volume = {5}, year = {2014}, url-1 = {http://journal.frontiersin.org/Journal/10.3389/fpsyg.2014.01297/}, url-2 = {https://doi.org/10.3389/fpsyg.2014.01297}}
@article{Allison:2014rm, abstract = {Compelling illusions of self motion, known as `vection', can be produced in a stationary observer by visual stimulation alone. The role of binocular vision and stereopsis in these illusions was explored in a series of three experiments. Previous research had provided evidence of stereoscopic enhancements for linear vection in depth (e.g. Palmisano, 1996; 2002). Here we examined the effects of binocular vision and stereopsis on linear vertical vection for the first time. Vertical vection was induced by the upward or downward translation of large stereoscopic surfaces. These surfaces were horizontally-oriented depth corrugations produced by disparity modulation of patterns of persistent or short lifetime dot elements. We found that binocular viewing of such surfaces significantly increased the magnitudes and decreased the onset delays of vertical vection. Experiments utilising short lifetime dot stereograms demonstrated that these particular binocular enhancements of vection were due to the motion of stereoscopically-defined features.}, author = {Allison, R. S. and Ash, A. and Palmisano, S. A.}, date-added = {2014-08-14 16:58:46 +0000}, date-modified = {2016-01-03 03:22:25 +0000}, journal = {Journal of Vision}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {12}, pages = {Article 5, 1-23}, title = {Binocular contributions to linear vection}, url = {http://dx.doi.org/10.1167/14.12.5}, url-1 = {http://dx.doi.org/10.1167/14.12.5}, volume = {14}, year = {2014}, url-1 = {http://dx.doi.org/10.1167/14.12.5}}
@article{Sakano:fk, abstract = {There are at least two possible binocular cues to motion-in-depth, namely disparity change over time and interocular velocity differences. There has been significant controversy about their relative contributions to the perception of motion-in-depth. In the present study, we used the technique of selective adaptation to address this question. In Experiment 1, we found that adaptation to motion-in-depth depicted by temporally correlated random-dot stereograms, which contained coherent interocular velocity difference, produced motion aftereffect in the depth direction irrespective of the adaptors' interocular correlation for any adaptation duration tested. This suggests that coherent changing disparity does not contribute to motion-in-depth adaptation. Because the aftereffect duration did not saturate in the tested range of adaptation duration, it is unlikely that the lack of the effect of changing disparity was due to a ceiling effect. In Experiment 2, we used a novel adaptor that contained a unidirectional coherent interocular velocity difference signal and a bidirectional changing disparity signal that should not induce a motion aftereffect in depth. Following the adaptation, motion aftereffect in depth occurred in the opposite direction to the adaptor's motion-in-depth based on interocular velocity difference. Experiment 3 demonstrated that these results generalized in 12 untrained subjects. These experiments suggest that the contribution of interocular velocity difference to the perception of motion-in-depth is substantial, while that of changing disparity is very limited (if any), at least at the stages of direction-selective mechanisms subject to an aftereffect phenomenon.}, author = {Sakano, Y. and Allison, R. S.}, date-added = {2014-06-17 15:56:54 +0000}, date-modified = {2014-07-24 23:08:09 +0000}, doi = {10.1167/14.8.21}, journal = {Journal of Vision}, keywords = {Motion in depth, Stereopsis}, month = {06}, number = {8}, pages = {article 21, 1-14}, title = {Aftereffect of motion-in-depth based on binocular cues: effects of adaptation duration, interocular correlation and temporal correlation}, url-1 = {http://dx.doi.org/10.1167/14.8.21}, volume = {14}, year = {2014}, url-1 = {https://doi.org/10.1167/14.8.21}}
@article{Tsirlin:2014kq, abstract = {In binocular vision, occlusion of one object by another gives rise to monocular occlusions---regions visible only in one eye. Although binocular disparities cannot be computed for these regions, monocular occlusions can be precisely localized in depth and can induce the perception of illusory occluding surfaces. The phenomenon of depth perception from monocular occlusions, known as da Vinci stereopsis, is intriguing, but its mechanisms are not well understood. We first propose a theory of the mechanisms underlying da Vinci stereopsis that is based on the psychophysical and computational literature on monocular occlusions. It postulates, among other principles, that monocular areas are detected explicitly, and depth from occlusions is calculated based on constraints imposed by occlusion geometry. Next, we describe a biologically inspired computational model based on this theory that successfully reconstructs depth in a large range of stimuli and produces results similar to those described in the psychophysical literature. These results demonstrate that the proposed neural architecture could underpin da Vinci stereopsis and other stereoscopic percepts.}, author = {Tsirlin, I. and Wilcox, L. M. and Allison, R. S.}, date-added = {2014-05-01 20:44:17 +0000}, date-modified = {2018-11-25 14:34:37 -0500}, doi = {10.1167/14.7.5}, journal = {Journal of Vision}, keywords = {Stereopsis}, number = {7}, pages = {article 5, 1-26}, title = {A computational theory of da Vinci stereopsis}, url-1 = {http://dx.doi.org/10.1167/14.7.5}, volume = {14}, year = {2014}, url-1 = {https://doi.org/10.1167/14.7.5}}
@article{Tomkins:vn, abstract = {Night-time flight searches using night vision goggles have the potential to improve early aerial detection of forest fires, which could in turn improve suppression effectiveness and reduce costs. Two sets of flight trials explored this potential in an operational context. With a clear line of sight, fires could be seen from many kilometres away (on average 3584 m for controlled point sources and 6678 m for real fires). Observers needed to be nearer to identify a light as a potential source worthy of further investigation. The average discrimination distance, at which a source could be confidently determined to be a fire or other bright light source, was 1193 m (95\% CI: 944 to 1442 m). The hit rate was 68\% over the course of the controlled experiment, higher than expectations based on the use of small fire sources and novice observers. The hit rate showed improvement over time, likely because of observers becoming familiar with the task and terrain. Night vision goggles enable sensitive detection of small fires, including those that were very difficult to detect during daytime patrols. The results demonstrate that small fires can be detected and reliably discriminated at night using night vision goggles at distances comparable to those recorded for daytime aerial detection patrols.}, author = {Tomkins, L. and Benzeroual, T. and Milner, A. and Zacher, J. E. and Ballagh, M. and McAlpine, R. and Doig, T. and Jennings, S. and Craig, G. and Allison, R. S.}, date-added = {2014-03-07 12:53:11 +0000}, date-modified = {2014-09-26 00:24:22 +0000}, doi = {10.1071/WF13042}, journal = {International Journal of Wildland Fire}, keywords = {Night Vision}, month = {05}, number = {5}, pages = {678-685}, title = {Use of Night-Vision Devices for Aerial Forest Fire Detection}, url = {http://percept.eecs.yorku.ca/papers/Fire journal paper final.pdf}, url-1 = {http://dx.doi.org/10.1071/WF13042}, volume = {23}, year = {2014}, url-1 = {http://percept.eecs.yorku.ca/papers/Fire%20journal%20paper%20final.pdf}, url-2 = {https://doi.org/10.1071/WF13042}}
@article{Howard:uq, abstract = {Information about the motion in depth of an object along the midline of a stationary observer is provided by changes in image size (looming), changes in vergence produced by changes in binocular disparity of the images of the object, and changes in relative disparity between the moving object and a stationary object. Each of these cues was independently varied in the dichoptiscope, which is described in Howard, Fukuda, and Allison (2013). The stimuli were a small central dot and a textured surface moving to and fro in depth along the midline. Observers tracked the motion with the unseen hand. Image looming was normal or absent. The change in vergence was absent, normal, more than normal, or reversed relative to normal. Changing relative disparity between the moving stimulus and a stationary surface was present or absent. Changing vergence alone produced no motion in depth for the textured surface but it produced some motion of the dot. Looming alone produced strong motion in depth for the texture but not for the dot. When the direction of motion indicated by looming was opposite that indicated by changing relative disparity observers could use either cue. The cues dissociated rather than combined.}, author = {Howard, I. P. and Fujii, Y. and Allison, R. S.}, date-added = {2013-12-13 01:25:16 +0000}, date-modified = {2016-01-03 03:21:39 +0000}, doi = {10.1167/14.2.14}, journal = {Journal of Vision}, keywords = {Stereopsis, Motion in depth}, number = {2}, pages = {Article 14, 1-16}, title = {Interactions between cues to visual motion in depth}, url = {http://jov.highwire.org/content/14/2/14.short}, url-1 = {http://jov.highwire.org/content/14/2/14.short}, url-2 = {http://dx.doi.org/10.1167/14.2.14}, volume = {14}, year = {2014}, url-1 = {http://jov.highwire.org/content/14/2/14.short}, url-2 = {https://doi.org/10.1167/14.2.14}}
@article{Lugtigheid:fk, abstract = {The brain receives disparate retinal input owing to the separation of the eyes, yet we usually perceive a single fused world. This is because of complex interactions between sensory and oculomotor processes that quickly act to reduce excessive retinal disparity. This implies a strong link between depth perception and fusion, but it is well established that stereoscopic depth percepts are also obtained from stimuli that produce double images. Surprisingly, the nature of depth percepts from such diplopic stimuli remains poorly understood. Specifically, despite long-standing debate it is unclear whether depth under diplopia is owing to the retinal disparity (directly), or whether the brain interprets signals from fusional vergence responses to large disparities (indirectly). Here, we addressed this question using stereoscopic afterimages, for which fusional vergence cannot provide retinal feedback about depth. We showed that observers could reliably recover depth sign and magnitude from diplopic afterimages. In addition, measuring vergence responses to large disparity stimuli revealed that that the sign and magnitude of vergence responses are not systematically related to the target disparity, thus ruling out an indirect explanation of our results. Taken together, our research provides the first conclusive evidence that stereopsis is a direct process, even for diplopic targets.}, author = {A. J. Lugtigheid and L. M. Wilcox and R. S. Allison and I. P. Howard}, date-added = {2013-11-26 12:25:39 +0000}, date-modified = {2014-09-26 02:31:01 +0000}, doi = {10.1098/rspb.2013.2118}, journal = {Proceedings of the Royal Society B}, keywords = {Stereopsis, Vergence}, number = {1776}, pages = {20132118.1-20132118.7}, title = {Vergence eye movements are not required for stereoscopic depth perception}, url = {http://rspb.royalsocietypublishing.org/content/281/1776/20132118.short}, url-1 = {http://rspb.royalsocietypublishing.org/content/281/1776/20132118.short}, url-2 = {http://dx.doi.org/10.1098/rspb.2013.2118}, volume = {281}, year = {2014}, url-1 = {http://rspb.royalsocietypublishing.org/content/281/1776/20132118.short}, url-2 = {https://doi.org/10.1098/rspb.2013.2118}}
@article{Howard:kx, abstract = {Shape constancy is the ability to perceive that a shape remains the same when seen in different orientations. It has usually been measured by asking subjects to match a shape in the frontal plane with an inclined shape. But this method is subject to ambiguity. In Experiment 1 we used a canonical-shape method, which is not subject to ambiguity. Observers selected from a set of inclined trapezoids the one that most resembled a rectangle (the canonical shape). This task requires subjects to register the linear perspective of the image, and the distance and inclination of the stimulus. For inclinations of 30\deg and 60\deg and distances up to 1 m subjects were able to distinguish between a rectangle and a trapezoid tapered 0.4\deg. As the distance of the stimulus increased to 3 m, linear perspective became increasingly perceived as taper. In Experiment 2 subjects matched the perceived inclination of an inclined rectangle, in which the only cue to inclination was disparity, to the perceived inclination of a rectangle with all depth cues present. As the distance of the stimulus increased, subjects increasingly underestimated the inclination of the rectangle. We show that this pattern of inclination underestimation explains the distance-dependent bias in taper judgments found in Experiment 1.}, author = {Howard, I. P. and Fujii, Y. and Allison, R. S. and Kirollos, R.}, date-added = {2013-11-11 19:31:02 +0000}, date-modified = {2014-09-26 00:36:20 +0000}, doi = {10.1016/j.visres.2013.10.021}, journal = {Vision Research}, keywords = {Stereopsis}, pages = {33-40}, title = {Shape constancy measured by a canonical-shape method}, url = {http://percept.eecs.yorku.ca/papers/shape constancy preprint.pdf}, url-1 = {http://dx.doi.org/10.1016/j.visres.2013.10.021}, volume = {94}, year = {2014}, url-1 = {http://percept.eecs.yorku.ca/papers/shape%20constancy%20preprint.pdf}, url-2 = {https://doi.org/10.1016/j.visres.2013.10.021}}
@article{Stransky:uq, abstract = {Stereoscopic 3D media has recently increased in appreciation and availability. This popularity has led to concerns over the health effects of habitual viewing of stereoscopic 3D content; concerns that are largely hypothetical. Here we examine the effects of repeated, long-term exposure to stereoscopic 3D in the workplace on several measures of stereoscopic sensitivity (discrimination, depth matching, and fusion limits) along with reported negative symptoms associated with viewing stereoscopic 3D. We recruited a group of adult stereoscopic 3D Industry Experts and compared their performance with observers who were i) inexperienced with stereoscopic 3D, ii) researchers who study stereopsis, and iii) vision researchers with little or no experimental stereoscopic experience. Unexpectedly we found very little difference between the four groups on all but the depth discrimination task, and the differences that did occur appear to reflect task-specific training or experience. Thus we found no positive or negative consequences of repeated and extended exposure to stereoscopic 3D in these populations.}, author = {Debi Stransky and L. M. Wilcox and Robert S. Allison}, date-added = {2013-09-16 22:56:38 +0000}, date-modified = {2014-09-26 00:40:47 +0000}, doi = {10.1145/2536810}, journal = {{ACM} Transactions on Applied Perception}, keywords = {Stereopsis}, number = {1}, pages = {Article 2, 1-14}, title = {Effects of long-term exposure on sensitivity and comfort with stereoscopic displays}, url = {http://percept.eecs.yorku.ca/papers/a2-stransky.pdf}, url-1 = {http://dx.doi.org/10.1145/2536810}, volume = {11}, year = {2014}, url-1 = {http://percept.eecs.yorku.ca/papers/a2-stransky.pdf}, url-2 = {https://doi.org/10.1145/2536810}}
@inbook{allison_perceptual_2014, abstract = {Stereoscopic film has long held an allure as the ultimate in fidelity for cinema and, as such, been a goal for those seeking the most compelling illusion of reality. However, the fundamental and technical limitations of the medium introduce a number of artefacts and imperfections that affect viewer experience. The renaissance of stereoscopic three-dimensional (S3D) film requires that film-makers revisit assumptions and conventions about factors that influence the visual appreciation and impact of their medium. This paper will discuss a variety of these issues from a perceptual standpoint and their implications for depth perception, visual comfort and sense of scale. The impact of these perceptual artefacts on the suspension of disbelief and the creation of alternate realities is discussed, as is their deliberate use when artistic considerations demand breaks with realism. Keywords: Stereoscopic film, perception, suspension of disbelief, stereopsis, realism }, author = {Allison, R. S. and Wilcox, L. M. and Kazimi, Ali}, booktitle = {{3D} Cinema and Beyond (chapter is reprinted from an article in Public)}, date-added = {2013-10-06 17:09:39 +0000}, date-modified = {2016-01-03 02:31:07 +0000}, editor = {Adler, Dan and Marchessault, Janine and Obradovic, Sanja}, isbn = {9781783200399}, keywords = {Stereopsis}, pages = {149-160}, publisher = {University of Chicago {Press/Intellect}}, title = {Perceptual Artefacts, Suspension of Disbelief and Realism in Stereoscopic {3D} Film}, url = {https://percept.eecs.yorku.ca/papers/Public Journal.pdf}, year = {2014}, url-1 = {http://www.press.uchicago.edu/ucp/books/book/distributed/Other/bo16816844.html}}
@incollection{Palmisano:2014mz, abstract = {Contrary to long-held assumptions, perceived scene rigidity is not essential for visually induced illusions of self-motion (i.e. vection). Roll vection can be induced by rotating a large homogeneous textured display relative to the upright observer. Under these conditions, the continuous roll vection experienced is paradoxically accompanied by maximum perceived self-tilts of less than 20 degrees (e.g. Howard, Cheung & Landolt, 1989). By contrast, Ian Howard's fully furnished tumbling room apparatus can induce highly compelling illusions of 360 degree (i.e. head-over-heels) self-rotation. We have found that both real and illusory tumbling in his room are accompanied by dramatic illusory scene distortions (scenery near the observer's fixation location sometimes leads and other times lags their more peripheral scenery). The fact that these scene distortion and self-motion illusions co-occur so successfully is both intriguing and a major challenge to existing theories of self-motion perception. Our research has eliminated explanations of these illusory scene shearing effects based on eye-movements, distance misperception, peripheral aliasing, differential motion sensitivity and adaptation. Intriguingly we have consistently found that perceived head-over-heels tumbling (either real or illusory) is the essential prerequisite for the scene shearing illusion.}, annote = {APCV 2014 July 2014}, author = {Palmisano, S. A. and Allison, R. S. and Howard, I. P.}, booktitle = {Asia-Pacific Conference on Vision, i-Perception}, date-added = {2014-09-08 14:16:27 +0000}, date-modified = {2014-09-09 18:56:18 +0000}, journal = {i-Perception}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {4}, pages = {437}, title = {Illusory scene shearing during real and illusory self-rotations in roll}, url = {http://i-perception.perceptionweb.com/fulltext/i05/apcv14s.pdf}, url-1 = {http://i-perception.perceptionweb.com/fulltext/i05/apcv14s.pdf}, volume = {5}, year = {2014}, url-1 = {http://i-perception.perceptionweb.com/fulltext/i05/apcv14s.pdf}}
@incollection{Shen:2014rz, abstract = {Recently high-frame rate movie technology has received significant technical and artistic attention due to its potential to present higher-fidelity motion to cinemagoers. Speed discrimination is a well-studied psychophysical task used to quantify sensitivity to motion. We used speed discrimination as a measure of the effects of frame presentation protocol on motion perception. An interleaved staircase procedure was used with a 2-interval-forced-choice task to measure discrimination thresholds for 7 subjects. The independent variables were frame rate and motion speed for a high-contrast line target. Flash (refresh) rate was fixed at 96 Hz and different frame rates were produced by updating the frame every refresh (single flash, 96 fps), alternate refresh (double flash, 48 fps) or every fourth refresh (quadruple flash, 24 fps). Stimuli were presented binocularly on CRT displays in a Wheatstone stereoscope but the presentation protocols approximate standard film presentation protocols. Five velocities (4deg/s, 8deg/s, 16deg/s, 32deg/s and 64deg/s) were tested in separate blocks of trials; within a block staircases for the three flash protocols were randomly interleaved. The results show that at speeds greater than 16deg/s, discrimination thresholds decrease with increasing frame rate (or equivalently, increase with number of repeated frames for a given flash protocol). This improvement likely reflects sensitivity to motion artifacts at low frame rates, when frames are repeated multiple times. Thus this study confirms that observers are sensitive to the improved fidelity offered by higher frame rates over the range considered for high frame rate cinema (24--96 fps). }, annote = {Fall Vision Meeting Oct 10-12, 2014, Philidelphia PA}, author = {Shen, L. and Allison, Robert S. and Wilcox, Laurie M. and Fujii, Y.}, booktitle = {OSA Fall Vision 2014, Journal of Vision}, date-added = {2014-09-08 14:16:00 +0000}, date-modified = {2015-01-05 00:02:35 +0000}, doi = {10.1167/14.15.57}, journal = {Journal of Vision}, keywords = {Stereopsis}, language = {en}, number = {15}, pages = {article 57}, title = {Motion discrimination of high frame rate movie}, url-1 = {http://dx.doi.org/10.1167/14.15.57}, volume = {14}, year = {2014}, url-1 = {https://doi.org/10.1167/14.15.57}}
@incollection{Fujii:2014ty, abstract = {Recent advances in film capture and display technologies have made it possible to use frame rates much higher than the 24fps convention. It is assumed that high frame rates (HFR) will enhance perception of motion in 2-D and 3-D media. The goal of this project is to assess this assumption empirically. In a series of experiments we measured lateral (2-D) and in depth (3-D) global motion coherence thresholds using random-dot patterns in a mirror stereoscope and a 3D projection system. The refresh rate of the display was fixed at 96 Hz, and we manipulated the flash protocol to create 96fps (single flash), 48fps (double flash) and 24fps (quadruple flash). Simulated linear velocity of the elements through space was equated in the 2-D and 3-D conditions. Conditions were randomly interleaved using the method of constant stimuli and a two-interval forced-choice procedure to measure the proportion of coherent elements required to reliably detect global motion. Results from six observers showed no consistent effect of flash protocol on coherence thresholds in either the 2-D or the 3-D test conditions. Interestingly, the 3-D task was considerably harder for all observers and required longer viewing time. Even with the increased viewing time, thresholds were double those seen in the lateral motion condition, despite the fact that the velocity of element motion through space was the same in the two conditions. Our results show that while frame rate influences local 2-D motion processing, it has no apparent impact on lateral, or in depth, global motion perception.}, annote = {Fall Vision Meeting Oct 10-12, 2014, Philidelphia PA}, author = {Fujii, Y. and Allison, Robert S. and Shen, L. and Wilcox, Laurie M.}, booktitle = {OSA Fall Vision 2014, Journal of Vision}, date-added = {2014-09-08 14:16:00 +0000}, date-modified = {2015-01-05 00:01:32 +0000}, doi = {10.1167/14.15.55}, journal = {Journal of Vision}, keywords = {Stereopsis}, language = {en}, number = {15}, pages = {article 55}, title = {The effects of frame rate on 2-D and 3-D global motion processing}, url-1 = {http://dx.doi.org/10.1167/14.15.55}, volume = {14}, year = {2014}, url-1 = {https://doi.org/10.1167/14.15.55}}
@incollection{Allison:2014qd, abstract = {An intriguing aspect of picture perception is the viewer's tolerance to modest variation in viewing position, perspective, and display size. In stereoscopic media, additional parameters control the relative position and orientation of the cameras. The amount of estimated depth from disparity can be obtained trigonometrically; however, perceived depth in complex scenes differs from geometrical predictions. It is not clear to what extent these differences are due to cognitive as opposed to perceptual factors. We recorded stereoscopic movies of an indoor scene with a range of inter-axial (IA) camera separation between 3 and 95 mm and displayed them on a range of screen sizes (all subtending 36 deg). Participants reproduced the depth between pairs of objects in the scene using reaching (3.5'' screen) or blind walking (54'' and 22'' screens). The effect of IA and screen size (and thus distance) was much smaller than predicted suggesting that observers compensate for distortion in the portrayed scene. These results mirror those obtained previously with depth magnitude estimation (Benzeroual et al., ECVP 2011). We conclude that multiple realistic depth cues drive normalization of perceived depth from binocular disparity and that these processes are not specific to either `perception' or `action' oriented tasks. }, annote = {APCV 2014 July 2014}, author = {Allison, R. S. and Benzeroual, K. and Wilcox, L. M.}, booktitle = {Asia-Pacific Conference on Vision, i-Perception}, date-added = {2014-09-08 14:16:00 +0000}, date-modified = {2014-09-09 18:55:51 +0000}, journal = {i-Perception}, keywords = {Stereopsis}, number = {4}, pages = {377}, title = {Active task measurements of tolerance to stereoscopic 3D image distortion}, url = {http://i-perception.perceptionweb.com/fulltext/i05/apcv14a.pdf}, url-1 = {http://i-perception.perceptionweb.com/fulltext/i05/apcv14a.pdf}, volume = {5}, year = {2014}, url-1 = {http://i-perception.perceptionweb.com/fulltext/i05/apcv14a.pdf}}
@incollection{Tsirlin22082014, abstract = {Stereoscopic acuity is known to vary with the overall size and width of the target. Recently, Tsirlin et al. (2012) suggested that perceived depth magnitude from stereopsis might also depend on the vertical extent of the stimulus. To test this hypothesis we compared perceived depth using small discs versus long bars with equivalent width and disparity. We used three estimation techniques. The first two, a virtual ruler and a touch-sensor (for haptic estimates), required that observers make quantitative judgements of depth differences between objects. The third method was a conventional disparity probe. This last technique, while often used for depth estimation, is a measure of disparity matching rather than quantitative depth perception. We found that depth estimates collected using the virtual ruler and the touch-sensor were significantly larger for the bar stimuli than for the disc stimuli. The disparity probe method yielded the same disparity estimates for both types of stimulus; which was not surprising given that they had the same relative disparity. In a second experiment, we measured perceived depth, using the virtual ruler, as a function of the height of a thin bar. In agreement with the first experiment, we found that perceived depth increased with increasing bar height. The dependence of perceived depth on the height of the stimulus is likely the result of the integration of disparity along the vertical edges, which enhances the reliability of depth estimation. The observed reduction in the magnitude of depth estimates for less reliable disparity signals may reflect a reweighting of depth cues or the expression of a bias towards small-disparities. Our results also underscore the often-overlooked difference between measurements of depth and disparity, as the effect of target height was obscured when the disparity probe was used.Meeting abstract presented at VSS 2014}, annote = {Vision Sciences Society 2014, St. Petersburg}, author = {Tsirlin, Inna and Wilcox, Laurie and Allison, Robert}, booktitle = {Vision Sciences Society Annual Meeting, Journal of Vision}, date-modified = {2015-01-05 00:03:19 +0000}, doi = {10.1167/14.10.977}, journal = {Journal of Vision}, keywords = {Stereopsis}, number = {10}, pages = {977}, title = {Size matters: Perceived depth magnitude varies with stimulus height}, url = {http://www.journalofvision.org/content/14/10/977.abstract}, url-1 = {http://www.journalofvision.org/content/14/10/977.abstract}, url-2 = {http://dx.doi.org/10.1167/14.10.977}, volume = {14}, year = {2014}, url-1 = {http://www.journalofvision.org/content/14/10/977.abstract}, url-2 = {https://doi.org/10.1167/14.10.977}}
@incollection{Allison:2014lq, abstract = {Ian Howard loved optomechanical or electromechanical solutions for experimental stimuli and apparatus, an approach that is rare nowadays. Sometimes the device was the stimulus itself, as with the tumbling room, and in others the electromechanical system moved conventional stimuli, as in the dichoptiscope. This approach often allowed for precision, realism and fidelity not possible with computer-generated displays although it required skill at mechanical design and construction. Having collaborated with Ian on building several devices, I will review Ian's approach to the design of such apparatus, in particular highlighting some notable devices used to study binocular vision. I will also discuss what we can learn from Ian's approach in the light of new rapid-prototyping and manufacturing technologies for producing precise and easily constructed mechanical devices.}, author = {Allison, R. S.}, booktitle = {37th European Conference on Visual Perception}, date-added = {2014-08-28 17:08:35 +0000}, date-modified = {2014-09-09 18:54:29 +0000}, journal = {Perception}, keywords = {Misc.}, pages = {3}, title = {Howard's Devices}, url = {http://www.perceptionweb.com/abstract.cgi?id=v1424030}, url-1 = {http://www.perceptionweb.com/abstract.cgi?id=v1424030}, volume = {43 (Suppl.)}, year = {2014}, url-1 = {http://www.perceptionweb.com/abstract.cgi?id=v1424030}}
@incollection{Allison:kx, abstract = {Ian Porteus Howard (1927-2013) had a remarkable academic career spanning over 60 years that started with his initial appointment at the University of Durham in 1952. He is probably best known for his outstanding books -- Human Spatial Orientation (1966) (with Brian Templeton), through Human Visual Orientation (1982), Binocular Vision and Stereopsis (1995), the 2 volumes of Seeing in Depth (2002) and finally the 3 volumes of Perceiving in Depth (2012). Ian was also a talented experimentalist and the creator and builder of many novel pieces of experimental equipment including his rotating sphere and rotating room. Over the six decades he worked on a wide variety of research topics together with many graduate students, post-docs and researchers from Canada, USA, UK, Japan and Australia.}, author = {Rogers, B. J. and Allison, R. S. and Palmisano, S. A.}, booktitle = {37th European Conference on Visual Perception}, date-added = {2014-08-28 17:08:35 +0000}, date-modified = {2014-09-09 18:55:24 +0000}, journal = {Perception}, keywords = {Misc.}, pages = {2}, title = {A celebration of the life and scientific work of Ian Howard}, url = {http://www.perceptionweb.com/abstract.cgi?id=v1424500}, url-1 = {http://www.perceptionweb.com/abstract.cgi?id=v1424500}, volume = {43 (Suppl.)}, year = {2014}, url-1 = {http://www.perceptionweb.com/abstract.cgi?id=v1424500}}
@incollection{Guterman:2014fk, abstract = {Aubert's (1861, Arch Pathol Anat, 20: 381-393) finding that a vertical line is perceived as tilted in tilted observers (``A-effect'') was tested using moving stimuli. Observers judged the tilt of a line and global motion while standing or laying on side. Postural effects were consistent with the A-effect, and when lying down shifts in the point of subjective equality were significantly smaller for motion (95\%CI: 2D = -11.49 +/- 5.86 deg., 3D= -17.08 +/- 4.77 deg.) than the line (95\%CI: -23 +/- 4.76 deg.). Findings will be discussed in terms of their implications for sensory integration.}, annote = {Ryerson July 3-5, 2014 24TH ANNUAL MEETING RYERSON UNIVERSITY JULY 3RD -- JULY 5TH 2014 TORONTO, ONTARIO 68 Issue 4 Pages 306-306}, author = {Guterman, P.S. and Allison, R. S.}, booktitle = {Canadian Society for Brain, Behaviour and Cognitive Science 24th Annual Meeting (CSBBCS), CANADIAN JOURNAL OF EXPERIMENTAL PSYCHOLOGY}, date-added = {2014-08-14 17:10:17 +0000}, date-modified = {2015-03-10 23:10:53 +0000}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {4}, pages = {306}, title = {Head orientation influences the perceived tilt of global motion}, volume = {68}, year = {2014}}
@incollection{Cutone:2014vn, abstract = { Stereoacuity of moving line targets presented using CRT displays are reportedly unaffected by lateral motion up to 2.5 deg/s. Here we re-assess the effects of lateral retinal motion on stereoacuity using custom built hardware that permits precise timing and movement of 'real' targets. Observers were asked to indicate the relative depth of two real vertically aligned luminous lines. We varied motion velocity and exposure time; there was no effect of either on performance. We conclude it is likely that the observed resilience to retinal motion reflects the rapid acquisition of the disparity signal, not the properties of the display system. }, annote = {Ryerson July 3-5, 2014 24TH ANNUAL MEETING RYERSON UNIVERSITY JULY 3RD -- JULY 5TH 2014 TORONTO, ONTARIO }, author = {Cutone, M and Wilcox, L. M. and Allison, R. S.}, booktitle = {Canadian Society for Brain, Behaviour and Cognitive Science 24th Annual Meeting (CSBBCS), CANADIAN JOURNAL OF EXPERIMENTAL PSYCHOLOGY}, date-added = {2014-08-14 17:10:17 +0000}, date-modified = {2015-09-03 17:25:19 +0000}, keywords = {Stereopsis}, number = {4}, pages = {306}, title = {Retinal Motion and Stereoacuity Revisited}, volume = {68}, year = {2014}}
@incollection{Guterman:2014sf, abstract = {Aubert's (1861, Arch Pathol Anat, 20: 381-393) finding that a vertical line is perceived as tilted in tilted observers demonstrated how percepts of verticality rely on the integration of multiple sensory systems. This phenomenon has been studied extensively using static stimuli. Global motion processing may play an important role in sensory integration, so here we follow up on our earlier report (VSS 2013) and explored whether this tilt occurs when viewing global motion displays. Observers stood and lay left side down while viewing a static line and random-dot display of 2D (planar) or 3D (volumetric) global motion. For each posture and motion type, a forced-choice staircase procedure determined the tilt of the stimulus that appeared subjectively vertical (PSE). Consistent with Aubert's A-effect and our earlier results using the method of constant stimuli, shifts were significantly greater when lying on the side than standing, and in the direction of the head tilt. In the lying position, the PSE shift was significantly smaller for the global motion stimuli (95\%CI: 2D = -11.49 +/- 5.86 deg., 3D = -17.08 +/- 4.77 deg.) than the line (95\%CI: -23 +/- 4.76 deg.). A control experiment using single and multiple line displays eliminated eccentricity and density as potential explanations for these differences. We will discuss these findings in terms of their implications for sensory integration and mapping of spatial reference frames.}, annote = {amsterdam june 11-15, 2014}, author = {Guterman, P.S. and Allison, R. S.}, booktitle = {International Multisensory Research Forum}, date-added = {2014-08-14 17:01:35 +0000}, date-modified = {2014-08-14 17:01:35 +0000}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {112}, title = {Postural effects onthe perceived tilt of a line and global motion}, year = {2014}}
@inproceedings{Ritvo:2014bh, abstract = {Despite a marked increase in the number of hardware and software systems being adapted and designed specifically for nonhuman animals, to-date, nearly all computer interaction design and assessment has been anthropocentric. Ironically, because nonhuman animals cannot provide, refuse, or withdraw consent to participate with ACI systems, valid and reliable evaluation of usability and user satisfaction is crucial. The current paper explores a) the potential benefits and costs of engaging in animal-computer interaction for nonhuman animal users, b) potential animal-computer interaction evaluation concerns, and c) the assessment of liking and preference in non-communicative subjects.}, annote = {Nov 14, 2014 Funchal, Madeira}, author = {Ritvo, S. E. and Allison, R. S.}, booktitle = {Proceedings of the 2014 Workshops on Advances in Computer Entertainment Conference: The First International Congress on Animal Human Computer Interaction}, date-added = {2014-10-19 02:40:19 +0000}, date-modified = {2015-10-02 20:35:10 +0000}, doi = {10.1145/2693787.2693795}, keywords = {Misc.}, month = {11}, organization = {{ACM}}, pages = {Article 4}, title = {Challenges Related to Nonhuman Animal-Computer Interaction: Usability and `Liking'}, url = {http://percept.eecs.yorku.ca/papers/AHCI-Paper-14-11-04.pdf}, url-1 = {http://percept.eecs.yorku.ca/papers/AHCI-Paper-14-11-04.pdf}, year = {2014}, url-1 = {http://percept.eecs.yorku.ca/papers/AHCI-Paper-14-11-04.pdf}, url-2 = {https://doi.org/10.1145/2693787.2693795}}
@inproceedings{Vinnikov:2014fk, abstract = {Computer-generated objects presented on a display typically have the same focal distance regardless of the monocular and binocular depth cues used to portray a 3D scene. This is because they are presented on a flat screen display that has a fixed physical location. In a stereoscopic 3D display, accommodation (focus) of the eyes should always be at the distance of the screen for clear vision regardless of the depth portrayed; this fixed accommodation conflicts with vergence eye movements that the user must make to fuse stimuli located off the screen. This is known as accommodation-vergence conflict and is detrimental for user experience of stereoscopic virtual environments (VE), as it can cause visual discomfort and diplopia during use of a stereoscopic display. It is believed that, by artificially simulating focal blur and natural accommodation, it is possible to compensate for the vergence-accommodation conflict and alleviate these symptoms. We hypothesized that it is possible to compensate for conflict with a fixed accommodation cue by adding simulated focal blur according to instantaneous fixation. We examined gaze-contingent depth of field (DOF) when used in stereoscopic and non-stereoscopic 3D displays. We asked our participants to compare different conditions in terms of depth perception, image quality and viewing comfort. As expected, we found that monocular DOF gave a stronger impression of depth than no depth of field, stereoscopic cues were stronger than any kind of monocular cues, but adding depth of field to stereo displays did not enhance depth impressions. The opposite was true for image comfort. People thought that DOF impaired image quality in monocular viewing. We also observed that comfort was affected by DOF and display mode in similar fashion as image quality. However, the magnitude of the effects of DOF simulation on image quality depended on whether people associated image quality with depth or not. These results suggest that studies evaluating DOF effectiveness need to consider the type of task, type of image and questions asked. }, annote = {Safety Harbor, FL, USA. Mar 26-28, 2014}, author = {Vinnikov, M. and Allison, R. S.}, booktitle = {{ACM} Eye Tracking Research and Applications 2014}, date-added = {2013-11-11 19:30:47 +0000}, date-modified = {2014-09-26 02:21:19 +0000}, doi = {10.1145/2578153.2578170}, keywords = {Eye Movements & Tracking}, pages = {119-126}, title = {Gaze-Contingent Depth of Field in Realistic Scenes: The User Experience}, url = {https://percept.eecs.yorku.ca/papers/119-vinnikov.pdf}, url-1 = {http://dx.doi.org/10.1145/2578153.2578170}, year = {2014}, url-1 = {https://percept.eecs.yorku.ca/papers/119-vinnikov.pdf}, url-2 = {https://doi.org/10.1145/2578153.2578170}}
@article{Howard:fk, abstract = {A stereoscope displays 2-D images with binocular disparities (stereograms), which fuse to form a 3-D stereoscopic object. But a stereoscopic object creates a conflict between vergence and accommodation. Also, motion in depth of a stereoscopic object simulated solely from change in target vergence produces anomalous motion parallax and anomalous changes in perspective. We describe a new instrument, which overcomes these problems. We call it the dichoptiscope. It resembles a mirror stereoscope, but instead of stereograms, it displays identical 2-D or 3-D physical objects to each eye. When a pair of the physical, monocular objects is fused, they create a dichoptic object that is visually identical to a real object. There is no conflict between vergence and accommodation, and motion parallax is normal. When the monocular objects move in real depth, the dichoptic object also moves in depth. The instrument allows the experimenter to control independently each of several cues to motion in depth. These cues include changes in the size of the images, changes in the vergence of the eyes, changes in binocular disparity within the moving object, and changes in the relative disparity between the moving object and a stationary object.}, author = {Howard, I. P. and Fukuda, K. and Allison, R. S.}, date-added = {2013-10-19 15:39:29 +0000}, date-modified = {2014-09-26 02:17:29 +0000}, doi = {10.1167/13.14.1}, journal = {Journal of Vision}, keywords = {Stereopsis, Motion in depth}, number = {14: Article 1}, pages = {1-11}, title = {The dichoptiscope: An instrument for investigating cues to motion in depth}, url = {http://ww.journalofvision.org/content/13/14/1.full}, volume = {13}, year = {2013}, url-1 = {http://ww.journalofvision.org/content/13/14/1.full}, url-2 = {https://doi.org/10.1167/13.14.1}}
@article{Allison:uq, abstract = {Superimposed luminance noise is typical of imagery from devices used for low-light vision such as image intensifiers (i.e., night vision devices). In four experiments, we measured the ability to detect and discriminate motion-defined forms as a function of stimulus signal-to-noise ratio at a variety of stimulus speeds. For each trial, observers were shown a pair of image sequences - one containing dots in a central motion-defined target region that moves coherently against the surrounding dots, which moved in the opposite or in random directions, while the other sequence had the same random/uniform motion in both the center and surrounding parts. They indicated which interval contained the target stimulus in a two-interval forced-choice procedure. In the first experiment, simulated night vision images were presented with Poisson-distributed spatiotemporal image noise added to both the target and surrounding regions of the display. As the power of spatiotemporal noise was increased, it became harder for observers to detect the target, particularly at the lowest and highest dot speeds. The second experiment confirmed that these effects also occurred with low illumination in real night vision device imagery, a situation that produces similar image noise. The third experiment demonstrated that these effects generalized to Gaussian noise distributions and noise created by spatiotemporal decorrelation. In the fourth experiment, we found similar speed-dependent effects of luminance noise for the discrimination (as opposed to detection) of the shape of a motion-defined form. The results are discussed in terms of physiological motion processing and for the usability of enhanced vision displays under noisy conditions.}, author = {Allison, R. S. and Macuda, T. and Jennings, S.}, date-added = {2013-06-12 23:21:15 +0000}, date-modified = {2014-09-26 01:00:52 +0000}, doi = {10.1109/THMS.2013.2284911}, journal = {IEEE Transactions on Human Machine Systems}, keywords = {Night Vision}, number = {6}, pages = {558-569}, title = {Detection and Discrimination of Motion-Defined Form in the Presence of Additive Noise: Implications for Motion Processing and Use of Night Vision Devices}, url = {http://percept.eecs.yorku.ca/papers/06645415.pdf}, url-1 = {http://ieeexplore.ieee.org/xpl/login.jsp?tp=&arnumber=6645415}, url-2 = {http://dx.doi.org/10.1109/THMS.2013.2284911}, volume = {43}, year = {2013}, url-1 = {http://percept.eecs.yorku.ca/papers/06645415.pdf}, url-2 = {https://doi.org/10.1109/THMS.2013.2284911}}
@article{Chen:2013uq, abstract = {Many materials, including water surfaces, jewels, and glassware exhibit transparent refractions. The human visual system can somehow recover 3D shape from refracted images. While previous research has elucidated various visual cues that can facilitate visual perception of transparent objects, most of them focused on monocular material perception. The question of shape perception of transparent objects is much more complex and few studies have been undertaken, particular in terms of binocular vision. In this article, we first design a system for stereoscopic surface orientation estimation with photo-realistic stimuli. It displays pre-rendered stereoscopic images and a real-time S3D (Stereoscopic 3D) shape probe simultaneously. Then we estimate people's perception of the shape of thin transparent objects using a gauge figure task. Our results suggest that people can consistently perceive the surface orientation of thin transparent objects, and stereoscopic viewing improves the precision of estimates. To explain the results, we present an edge-aware orientation map based on image gradients and structure tensors to illustrate the orientation information in images. We also decomposed the normal direction of the surface into azimuth angle and slant angle to explain why additional depth information can improve the accuracy of perceived normal direction. }, annote = {presented at ACM SAP Dublin, Sept 2013 }, author = {Chen, J. and Allison, R. S.}, date-added = {2013-06-12 23:20:52 +0000}, date-modified = {2014-09-26 02:17:06 +0000}, doi = {10.1145/2506206.2506208}, journal = {{ACM} Transactions on Applied Perception (TAP)}, keywords = {Stereopsis}, month = {08}, number = {3, Article 15}, pages = {1-15}, title = {Shape Perception of Thin Transparent Objects with Stereoscopic Viewing}, url = {http://percept.eecs.yorku.ca/papers/a15-chen(1).pdf}, url-1 = {http://dx.doi.org/10.1145/2506206.2506208}, volume = {10}, year = {2013}, url-1 = {http://percept.eecs.yorku.ca/papers/a15-chen(1).pdf}, url-2 = {https://doi.org/10.1145/2506206.2506208}}
@article{Allison:fk, abstract = {Stereoscopic film has long held an allure as the ultimate in fidelity for cinema and, as such, been a goal for those seeking the most compelling illusion of reality. However, the fundamental and technical limitations of the medium introduce a number of artefacts and imperfections that affect viewer experience. The renaissance of stereoscopic three-dimensional (S3D) film requires that film-makers revisit assumptions and conventions about factors that influence the visual appreciation and impact of their medium. This paper will discuss a variety of these issues from a perceptual standpoint and their implications for depth perception, visual comfort and sense of scale. The impact of these perceptual artefacts on the suspension of disbelief and the creation of alternate realities is discussed, as is their deliberate use when artistic considerations demand breaks with realism. Keywords: Stereoscopic film, perception, suspension of disbelief, stereopsis, realism }, author = {Allison, R. S. and Wilcox, L. M. and Kazimi, A.}, date-added = {2013-06-01 02:29:52 +0000}, date-modified = {2014-01-12 01:12:53 +0000}, doi = {10.1386/public.24.47.149_1}, journal = {Public}, keywords = {Stereopsis}, pages = {149-160}, title = {Perceptual artefacts, suspension of disbelief and realism in stereoscopic 3D film}, url = {http://www.ingentaconnect.com/content/intellect/public/2013/00000024/00000047/art00010}, url-1 = {https://percept.eecs.yorku.ca/papers/Public Journal.pdf}, url-2 = {http://www.ingentaconnect.com/content/intellect/public/2013/00000024/00000047/art00010}, url-3 = {http://dx.doi.org/10.1386/public.24.47.149_1}, volume = {47 (Parallax: Stereoscopic 3D)}, year = {2013}, url-1 = {http://www.ingentaconnect.com/content/intellect/public/2013/00000024/00000047/art00010}, url-2 = {https://doi.org/10.1386/public.24.47.149_1}}
@article{Ash:2013fk, abstract = {Vection has typically been induced in stationary observers (ie conditions providing visual-only information about self-motion). Two recent studies have examined vection during active treadmill walking---one reported that treadmill walking in the same direction as the visually simulated self-motion impaired vection (Onimaru et al, 2010 Journal of Vision 10(7):860), the other reported that it enhanced vection (Seno et al, 2011 Perception 40 747--750; Seno et al, 2011 Attention, Perception, & Psychophysics 73 1467--1476). Our study expands on these earlier investigations of vection during observer active movement. In experiment 1 we presented radially expanding optic flow and compared the vection produced in stationary observers with that produced during walking forward on a treadmill at a `matched' speed. Experiment 2 compared the vection induced by forward treadmill walking while viewing expanding or contracting optic flow with that induced by viewing playbacks of these same displays while stationary. In both experiments subjects' tracked head movements were either incorporated into the self-motion displays (as simulated viewpoint jitter) or simply ignored. We found that treadmill walking always reduced vection (compared with stationary viewing conditions) and that simulated viewpoint jitter always increased vection (compared with constant velocity displays). These findings suggest that while consistent visual--vestibular information about self-acceleration increases vection, biomechanical self-motion information reduces this experience (irrespective of whether it is consistent or not with the visual input). }, author = {April Ash and Stephen Palmisano and Deborah Apthorp and Robert S. Allison}, date-added = {2013-05-02 10:56:15 +0000}, date-modified = {2014-09-26 02:17:48 +0000}, doi = {10.1068/p7449}, journal = {Perception}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {562 -- 576}, title = {Vection In Depth During Treadmill Walking}, url = {http://percept.eecs.yorku.ca/papers/ash-treadmill.pdf}, url-1 = {http://dx.doi.org/10.1068/p7449}, volume = {42}, year = {2013}, url-1 = {http://percept.eecs.yorku.ca/papers/ash-treadmill.pdf}, url-2 = {https://doi.org/10.1068/p7449}}
@article{Rushton:2020fj, abstract = {We describe simple heuristics, based on perceptual variables, that produce human-like trajectories towards moving and stationary targets, and around moving and stationary obstacles. Interception of moving and stationary objects can be achieved through regulation of self-movement to maintain a target at a constant eccentricity, or by cancelling the change (drift) in the eccentricity of the target. We first show how a constant eccentricity strategy can be extended to home in on optimal paths and avoid obstacles. We then identify a simple visual speed ratio that signals a future collision, and the change in path needed for avoidance. The combination of heuristics based on eccentricity and the speed-ratio produces human-like behaviour. The heuristics can be used to animate avatars in virtual environments or to guide mobile robots. Combined with higher-level goal setting and way-finding behaviours, such navigation heuristics could provide the foundation for generative models of natural human locomotion}, author = {Rushton, S.K. and Allison, R.S.}, date-added = {2012-10-13 00:14:48 +0000}, date-modified = {2014-09-26 01:20:57 +0000}, doi = {10.1016/j.displa.2012.10.006}, journal = {Displays}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {2}, pages = {105-113}, title = {Biologically-inspired heuristics for human-like walking trajectories toward targets and around obstacles}, url = {http://percept.eecs.yorku.ca/papers/displays.pdf}, url-1 = {http://dx.doi.org/10.1016/j.displa.2012.10.006}, volume = {34}, year = {2013}, url-1 = {http://percept.eecs.yorku.ca/papers/displays.pdf}, url-2 = {https://doi.org/10.1016/j.displa.2012.10.006}}
@incollection{Guterman:2013uq, author = {Guterman, P.S. and Allison, R. S. and Zacher, J. E.}, booktitle = {CVR 2013: Interactions in Vision}, date-added = {2013-10-04 11:00:04 +0000}, date-modified = {2013-10-04 11:00:04 +0000}, howpublished = {CVR Wed June 26 -- Fri 28 June, 2013}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, title = {Effects of head orientation on the perceived tilt of a line and global motion.}, year = {2013}}
@incollection{Vinnikov:2013fk, abstract = {Purpose: Age-related macular degeneration AMD is a leading cause of blindness in ageing populations across the world. The symptoms associated with later stages of the disease are sizeable blind spots (scotomas) in the central visual field, which significantly impact all aspects of everyday life (including driving and navigation). In contrast, the early symptoms of the disease include gradual reduction in acuity and visual distortion in the affected areas, also known as metamorphopsia. There is limited research on the functional consequences of symptoms in the early stages of the disease. Methods: We examined the effects of the following macular degeneration symptoms on gaze behavior and steering performance: (i) horizontal distortions, (ii) Gaussian (both horizontal and vertical) distortions and (iii) central scotomas (iv) unimpaired vision condition. To ensure repeatability, we studied healthy participants and used a gaze-contingent display paradigm to simulate these visual deficiencies in real time. Driving was simulated at different speeds on two-lane curving rural roads with various layouts. Results: We predicted that gaze and driving performance would be more similar for the visual distortion and scotoma conditions than for conditions with no simulated visual deficiency. As expected, several deficits in driver performance were observed during simulated macular degeneration conditions. While gaze was reliably directed to nearer scene features during the Gaussian distortion and scotoma trials (compared to unimpaired trials), variability in lateral gaze did not differ (suggesting that information from the peripheral visual field was used to compensate for information that would have normally been available from the central visual field). Based on past findings, we also expected people to direct their gaze more towards the inner side of the curve. However, on a significant number of turns, we observed that people often preferred to look at outer curve instead (e.g. on average, this occurred about 5\% more often in macular degeneration trials than in the unimpaired trials). Conclusions: Simulated symptoms of early stage Macular Degeneration impacted steering and gaze behaviour. We are currently looking at gaze pattern signatures for each condition and correlating these with driving performance. In our future research we would like to examine collision avoidance strategies associated with different stages of the disease. Funding Sources: Endeavour Fellowship (Australia); Province of Ontario ORF/RE (CIV/DDD) Biography: Margarita Vinnikov is currently a PhD candidate in the Department of Electrical Engineering and Computer Science, York University, Toronto. She works in the Virtual Reality and Perception Laboratory under supervision of Dr. Robert S. Allison. In 2006, she completed an Honours B.Sc. Specialized in Computer Science and in 2009, she completed M.Sc. Computer Science. Her research interest is in gaze-contingent real-time simulations of impaired vision. }, annote = {Dearborn MI Sept 16-18, 2013}, author = {Vinnikov, M. and Palmisano, S.A. and Allison, R. S.}, booktitle = {The Eye, the Brain and the Auto, Detroit Michigan Sept 2013}, date-added = {2013-10-04 10:59:33 +0000}, date-modified = {2013-10-09 14:06:56 +0000}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, title = {Steering with Simulated Symptoms of Age-related Macular Degeneration}, year = {2013}}
@incollection{guterman_effects_2013, abstract = {When the head is tilted an objectively vertical (or horizontal) line is typically perceived as tilted. We explored whether this shift occurs when viewing {3D} global motion displays. Global motion is processed, in part, in cortical area {MST}, which is believed to be involved in multisensory integration and may facilitate the mapping of spatial reference frames. Thus, we hypothesized that observers may be less susceptible to these biases for global motion compared to line displays. Observers stood, and lay left and right side down, while viewing a static line or random-dot {3D} global motion display. The line and motion direction were tilted $0^{\circ}$, $\pm 5^{\circ}$, $\pm 10^{\circ}$, $\pm 15^{\circ}$, $\pm 20^{\circ}$, and $\pm 25 ^{\circ}$ from the gravitational vertical, and in a separate block tilted from the horizontal. After each trial, observers indicated whether the tilt was clockwise or counterclockwise from the perceived vertical or horizontal with a button press. Psychometric functions were fit to the data and shifts in the point of subjective equality ({PSE)} were measured. These shifts were greater when lying on the side than standing. These shifts were biased in the direction of the head tilt, consistent with the so-called A-effect. However, contrary to an earlier study by De Vrijer, Medendorp, and Van Gisbergen (2008, J Neurophysiol, 99: 915--930) that found similar {PSE} shifts for lines and {2D} planar motion, we found significantly larger shifts for the static line than {3D} global motion. There was no appreciable difference between the shift magnitude in the tilt-from-vertical and horizontal conditions. Furthermore, the direction of motion (up/down, left/right) had no significant influence on the {PSE.} The results will be discussed in terms of the sensory integration of motion information in cortical areas. Meeting abstract presented at {VSS} 2013}, author = {Guterman, Pearl S. and Allison, Robert S. and Zacher, James E.}, booktitle = {Vision Sciences Society Annual Meeting, Journal of Vision}, date-added = {2013-07-26 12:34:31 +0000}, date-modified = {2019-02-03 09:34:05 -0500}, doi = {10.1167/13.9.874}, file = {Snapshot:/Users/allison/Library/Application Support/Firefox/Profiles/xdf9xly7.default/zotero/storage/3M5QDWXD/874.html:text/html}, issn = {, 1534-7362}, journal = {Journal of Vision}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, language = {en}, month = 07, number = {9}, pages = {874--874}, title = {Effects of head orientation on the perceived tilt of a static line and {3D} global motion.}, url = {http://www.journalofvision.org/content/13/9/874}, url-1 = {http://www.journalofvision.org/content/13/9/874}, url-2 = {http://dx.doi.org/10.1167/13.9.874}, urldate = {2013-07-26}, volume = {13}, year = {2013}, url-1 = {http://www.journalofvision.org/content/13/9/874}, url-2 = {https://doi.org/10.1167/13.9.874}}
@incollection{tsirlin_combining_2013, abstract = {The three-dimensional structure of the world can be reconstructed using the differences, or binocular disparities, between the positions and appearance of the images of the same objects on the two retinae. Occlusion of one object by another gives rise to areas visible only in one eye, called monocular occlusions, for which binocular disparities cannot be computed. Nevertheless, monocular occlusions can be perceived at precise locations in depth and can even induce the perception of illusory occluding surfaces. Since a growing body of literature has shown that monocular occlusions are an integral part of stereoscopic depth perception, it is important that we understand the mechanisms of depth extraction from monocular occlusions. Psychophysical experiments suggest that the visual system is able to assign depth from monocularly occluded areas based on the constraints imposed by the viewing geometry. However, none of the existing models of stereopsis use viewing geometry as the primary mechanism for quantitative and qualitative depth extraction in occluded areas. Moreover, no model has been shown to recover depth and structure of illusory occluding surfaces induced by the presence of monocular regions. We propose a new model of depth perception from disparity and monocular occlusions in which monocularly occluded areas are detected explicitly and quantitative depth from occlusions is calculated based on occlusion geometry. The model represents several levels of processing in the visual cortex and includes complex interactions between disparity and monocular occlusion detectors. It successfully reconstructs depth in a large range of stimuli including random-dot stereograms, illusory occluder stimuli, da Vinci arrangements and natural images. Thus we demonstrate that a dedicated set of mechanisms for processing of monocular occlusions combined with classical disparity detectors can underpin a wide range of stereoscopic percepts. Meeting abstract presented at {VSS} 2013}, author = {Tsirlin, Inna and Wilcox, Laurie M. and Allison, Robert S.}, booktitle = {Vision Sciences Society Annual Meeting, Journal of Vision}, date-added = {2013-07-26 12:10:04 +0000}, date-modified = {2013-07-26 12:10:45 +0000}, doi = {10.1167/13.9.1177}, file = {Snapshot:/Users/allison/Library/Application Support/Firefox/Profiles/xdf9xly7.default/zotero/storage/UD9DBGPZ/1177.html:text/html}, issn = {, 1534-7362}, journal = {Journal of Vision}, keywords = {Stereopsis}, language = {en}, month = 07, number = {9}, pages = {1177--1177}, shorttitle = {Combining occlusion and disparity information}, title = {Combining occlusion and disparity information: a computational model of stereoscopic depth perception.}, url = {http://www.journalofvision.org/content/13/9/1177}, url-1 = {http://www.journalofvision.org/content/13/9/1177}, url-2 = {http://dx.doi.org/10.1167/13.9.1177}, urldate = {2013-07-26}, volume = {13}, year = {2013}, url-1 = {http://www.journalofvision.org/content/13/9/1177}, url-2 = {https://doi.org/10.1167/13.9.1177}}
@incollection{lugtigheid_are_2013, abstract = {Accurate information about three-dimensional ({3D)} motion is essential for interception. Being able to detect changes in the speed of motion is potentially important, as approaching objects are unlikely to maintain constant velocity either by intent, or because of the force of gravity or friction. However, evidence from the interception literature shows that acceleration is not taken into account when judging time-to-contact from looming (i.e. retinal expansion). These data may reflect a curious insensitivity to {3D} acceleration, a possibility that has received little empirical attention. As a first step towards a better understanding of this apparent lack of sensitivity, we assessed discrimination thresholds for {3D} velocity changes. Observers viewed animations of an approaching object undergoing an increase (acceleration) or decrease (deceleration) in its simulated approach speed over the trial. The stimulus was a thin outline disk that was viewed monocularly, such that looming was the only available cue to motion in depth. On each trial, observers discriminated acceleration sign. We measured psychometric functions for three interleaved average speeds. To discourage observers from using non-relevant cues (e.g. due to regularities in the stimulus and correlations between variables) we randomized the simulated starting and ending distance. Our results show that observers were able to detect acceleration in depth, but their thresholds were very high (about a 25-33\% velocity change). While precision did not depend on average velocity, there was a velocity-dependent bias: observers were more likely to report that the object accelerated for higher average approach speeds and vice versa. Thus, observers were sensitive to the acceleration of an approaching object under minimal cue conditions, but they could not completely dissociate speed and acceleration. We will discuss which signals could support monocular discrimination of {3D} acceleration and produce the bias we found. Furthermore, we will extend these experiments to consider stereoscopic {3D} acceleration. Meeting abstract presented at {VSS} 2013}, author = {Lugtigheid, Arthur J. and Allison, Robert S. and Wilcox, Laurie M.}, booktitle = {Vision Sciences Society Annual Meeting, Journal of Vision}, date-added = {2013-07-26 12:10:04 +0000}, date-modified = {2013-12-13 01:29:27 +0000}, doi = {10.1167/13.9.970}, file = {Snapshot:/Users/allison/Library/Application Support/Firefox/Profiles/xdf9xly7.default/zotero/storage/XMDHDG42/970.html:text/html}, issn = {, 1534-7362}, journal = {Journal of Vision}, keywords = {Stereopsis, Motion in depth}, language = {en}, month = 07, number = {9}, pages = {970--970}, title = {Are we blind to three-dimensional acceleration?}, url = {http://www.journalofvision.org/content/13/9/970}, url-1 = {http://www.journalofvision.org/content/13/9/970}, url-2 = {http://dx.doi.org/10.1167/13.9.970}, urldate = {2013-07-26}, volume = {13}, year = {2013}, url-1 = {http://www.journalofvision.org/content/13/9/970}, url-2 = {https://doi.org/10.1167/13.9.970}}
@incollection{kirollos_perception_2013, abstract = {Adaptation to the microgravity environment and readaptation to the 1-g environment requires recalibration of the visual and vestibular signals. Previous research on the perception of visually stimulated self-motion (vection) in 1-g environments has shown that adding simulated view-point oscillation enhances the illusion of self-motion. However the role simulated oscillation plays in vection in relation to adaptation to gravity remains unclear. The goal of this experiment was to understand how simulated viewpoint oscillation can change the subjective feeling of vection in microgravity compared to 1-g. This was done by measuring participant sensation of vection before, during, and after parabolic flight. Eight participants viewed twenty-second clips displayed on a thirteen-inch laptop equipped with a hood and shroud aboard the aircraft. The clips simulated vection in the radial, oscillation or jitter motion conditions and were presented during microgravity periods of the six parabolas of a flight. Participants were asked to rate their feeling of self-motion after each clip presentation. Onset of vection and vection duration were also measured by pressing a button on a gamepad during vection. Results in microgravity showed that this oscillation effect is reduced and a small overall reduction in vection sensitivity post-flight was observed. A supplementary ground experiment demonstrated that vection did not vary significantly over multiple testing sessions and that the oscillation effect persisted as previously reported in the literature. These findings: (i) demonstrate that the oscillation advantage for vection is very stable and repeatable during 1-g conditions and (ii) imply that adaptation or conditioned responses played a role in the post-flight vection reductions. The effects observed in microgravity are discussed in terms of the ecology of terrestrial locomotion and the nature of movement in microgravity. }, author = {Kirollos, Ramy and Allison, Robert and Zacher, James and Guterman, Pearl S. and Palmisano, Stephen}, booktitle = {Vision Sciences Society Annual Meeting, Journal of Vision}, date-added = {2013-07-26 12:02:05 +0000}, date-modified = {2013-07-26 12:08:27 +0000}, doi = {10.1167/13.9.702}, file = {Snapshot:/Users/allison/Library/Application Support/Firefox/Profiles/xdf9xly7.default/zotero/storage/S2CPWT9A/702.html:text/html}, issn = {, 1534-7362}, journal = {Journal of Vision}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, language = {en}, number = {9}, pages = {702--702}, title = {Perception of smooth and perturbed vection in short-duration microgravity}, url = {http://www.journalofvision.org/content/13/9/702}, url-1 = {http://www.journalofvision.org/content/13/9/702}, url-2 = {http://dx.doi.org/10.1167/13.9.702}, urldate = {2013-07-26}, volume = {13}, year = {2013}, url-1 = {http://www.journalofvision.org/content/13/9/702}, url-2 = {https://doi.org/10.1167/13.9.702}}
@incollection{Allison:2013fk, abstract = {Compelling illusions of self motion, known as vection, can be produced in a stationary observer by visual stimulation alone. The role of binocular vision and stereopsis in these illusions was explored in a series of three experiments. Linear vertical vection was produced by upward or downward translation of stereoscopic surfaces. The surfaces were horizontally-oriented depth corrugations produced by disparity modulation of patterns of persistent or short lifetime dot elements. The experiments demonstrate an increase in vection magnitude and decrease in vection latency with binocular viewing. Experiments utilising short lifetime dot stereograms demonstrated that this binocular enhancement was due to the motion of stereoscopically defined features. }, annote = {July 5th-8th, 2013. Suzhou, China}, author = {Allison, R. S. and Ash, A. and Palmisano, S. A.}, booktitle = {The 9th Asia-Pacific Conference on Vision (APCV 2013). PsyCh}, date-added = {2013-06-12 23:23:43 +0000}, date-modified = {2014-02-09 01:36:20 +0000}, doi = {10.1002/pchj.33}, journal = {PsyCh Journal}, keywords = {Stereopsis, Optic flow & Self Motion (also Locomotion & Aviation)}, month = {06}, pages = {48}, title = {Binocular contributions to linear vection}, url = {http://www.apcv2013.org/images/APCV-program-full-version.pdf}, url-1 = {http://www.apcv2013.org/images/APCV-program-full-version.pdf}, volume = {2 (Suppl 1)}, year = {2013}, url-1 = {http://www.apcv2013.org/images/APCV-program-full-version.pdf}, url-2 = {https://doi.org/10.1002/pchj.33}}
@incollection{Palmisano:2013fk, abstract = {Traditionally vection studies have induced visual illusions of self-motion in physically stationary observers. Recently, two studies examined vection during treadmill walking. While one study found that treadmill walking in the same direction as the simulated self-motion impaired vection (Onimaru et al. 2010), the other found that this same situation enhanced vection (Seno et al. 2011). This study expands on these earlier investigations of active vection. Our subjects viewed radial optic flow (simulating forwards/backwards self-motion) while (a) walking forward on a treadmill at a matched speed, (b) walking on the spot or (c) standing still. On half the trials, the subject's head-tracked physical head movements were updated directly into the self-motion display producing simulated viewpoint jitter. On the remainder, subjects viewed non-jittering optic flow (as in the two earlier studies). We found an overall reduction in the vection induced for all three walking conditions (consistent and inconsistent treadmill walking, as well as walking on the spot) compared to stationary viewing condition. However, the addition of consistent simulated viewpoint oscillation to the self-motion display always improved vection (in both walking and stationary conditions alike). These findings suggest that complex multisensory interactions are involved in the perception self-motion. }, author = {Palmisano, S. A. and Ash, A. and Govan, D. G. and Allison, R. S.}, booktitle = {40th Australasian Experimental Psychology Conference, April 3-6, 2013, Adelaide, Australia}, date-added = {2013-04-02 23:40:16 +0000}, date-modified = {2013-04-02 23:45:07 +0000}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {61}, title = {Vection during treadmill walking, walking on the spot and standing still.}, url-1 = {https://www.adelaide.edu.au/epc2013/abstracts/120.html}, year = {2013}}
@inproceedings{Benzeroual:2013fk, abstract = {Mass-market stereoscopic 3D gaming has recently become a reality on both gaming consoles and PCs. At the same time the success of devices such as the Nintendo Wii, Nintendo Wii Balance Board, Sony Move and Microsoft Kinect have made active movement of the head, limbs and body a key means of interaction in many games. We hypothesized that players may be more prone to cybersickness symptoms in stereoscopic 3D games based on active movement compared to similar games played with controllers or other devices, which do not require physical movement of the body with the exception of the hands and fingers. Two experimental games were developed to test this hypothesis while keeping other parameters as constant as possible. For the disorientation and oculomotor cybersickness subscales and the overall score of the Simulator Sickness Questionnaire, a significant interaction between display mode (S3D versus non-stereoscopic) and motion sickness susceptibility was found. However, contrary to our hypothesis, there was no indication that participants were particularly susceptible to cybersickness in S3D motion controller games.}, annote = {Dec 2013, Liege Belgium}, author = {Benzeroual, K. and Allison, R. S.}, booktitle = {{IEEE} International Conference on 3D Imaging (IC3D)}, date-added = {2013-11-11 19:30:47 +0000}, date-modified = {2014-09-26 02:00:48 +0000}, doi = {10.1109/IC3D.2013.6732090}, keywords = {Stereopsis, Optic flow & Self Motion (also Locomotion & Aviation)}, month = {December 3-5}, pages = {1-7}, publisher = {{IEEE}}, title = {Cyber (Motion) Sickness In Active Stereoscopic 3d Gaming}, url = {http://percept.eecs.yorku.ca/papers/cybersickness.pdf}, url-1 = {http://dx.doi.org/10.1109/IC3D.2013.6732090}, year = {2013}, url-1 = {http://percept.eecs.yorku.ca/papers/cybersickness.pdf}, url-2 = {https://doi.org/10.1109/IC3D.2013.6732090}}
@inproceedings{Chen:2013fk, abstract = {Stereoscopic 3D film production has increased the need for efficient and robust camera calibration and tracking. Many of these tasks involve making planar correspondence and thus accurate fast homography estimation is essential. However, homography estimation may fail with distorted images since the planar projected corners may be distorted far away from the ``perfect'' locations. On the other hand, precisely estimating lens distortion from a single image is still a challenge, especially in real-time applications. In this paper, we drop the assumption that the image distortion is negligible in homography estimation. We propose robust homography as a simple and efficient approach which combines homography mapping and image distortion estimation in a least square constraint. Our method can simultaneously estimate homography and image distortion from a single image in real-time. Compared with previous methods, it has two advantages: first, un-distortion can be achieved with little overhead due to the need for only a single calibration image and the real-time homography mapping of easy to track corners; second, due to the use of precise calibration targets the accuracy of our method is comparable to the multiple image calibration methods. In an experimental evaluation, we show that our method can accurately estimate image distortion parameters in both synthetic and real images. We also present its applications in close range un-distortion and robust corner detection. }, annote = {Liege Belgium Dec 3-5, 2013}, author = {Chen, J. and Benzeroual, K. and Allison, R. S.}, booktitle = {{IEEE} International Conference on 3D Imaging (IC3D).}, date-added = {2013-11-11 19:30:47 +0000}, date-modified = {2014-09-26 02:21:37 +0000}, doi = {10.1109/IC3D.2013.6732075}, journal = {International Conference on 3D Imaging (IC3D)}, keywords = {Misc.}, month = {12}, pages = {1-8}, publisher = {{IEEE}}, title = {Robust Homography for Real-time Image Un-distortion}, year = {2013}, url-1 = {http://percept.eecs.yorku.ca/papers/robust%20homography.pdf}, url-2 = {https://doi.org/10.1109/IC3D.2013.6732075}}
@inproceedings{Allison:2013uq, abstract = {Vergence eye movements are a key factor in stereoscopic depth perception. In this brief review I will outline work at York University on select aspects of the relation between vergence and stereopsis, sensory fusion and depth constancy. Key words Vergence, depth perception, distance perception, eye movements, depth constancy, fusion }, annote = {Kyoto, Japan, Sept 12-13, 2013}, author = {Allison, R. S.}, booktitle = {Proceedings of the IEICE-Human Information Processing technical committee conference, Kyoto, Japan, Sept 12-13, 2013}, date-added = {2013-08-22 07:58:10 +0000}, date-modified = {2013-10-09 14:21:28 +0000}, keywords = {Eye Movements & Tracking}, organization = {Technical Report of IEICE}, pages = {29-34}, title = {[Invited Talk] The perceptual consequences of vergence eye movements: A brief review}, url = {http://www.eecs.yorku.ca/percept/papers/IEICE Kyoto paper.pdf}, volume = {HIP2013-55}, year = {2013}, url-1 = {http://www.eecs.yorku.ca/percept/papers/IEICE%20Kyoto%20paper.pdf}}
@inproceedings{Deas:2013kx, abstract = {The perception of synchronous, intelligible, speech is fundamental to a high-quality modern cinema experience. Surprisingly, this issue has remained relatively unexplored in stereoscopic 3D (S3D) media, despite its increasing popularity. Instead, visual parameters have been the primary focus of concern for those who create, and those who study the impact of, S3D content. In the work presented here we ask if ability to integrate audio and visual information is influenced by adding the third dimension to film. We also investigate the effects of known visual parameters (horizontal and vertical parallax), on audio-visual integration. To this end, we use an illusion of speech processing known as the McGurk effect as an objective measure of multi-modal integration. In the classic (2D) version of this phenomenon, discrepant auditory (/ba/) and visual (/ga/) information typically results in the perception of a unique `fusion' syllable (e.g. /da/). We extended this paradigm to measure the McGurk effect in a small theatre. We varied the horizontal (IA: 0, 6, 12, 18, 24 mm) and vertical (0, 0.5, 0.75, 1 deg) parallax from trial-to-trial and asked observers to report their percept of the phoneme. Our results show a consistently high proportion of the expected fusion responses, with no effect of horizontal or vertical offsets. These data are the first to show that the McGurk effect extends to stereoscopic stimuli and is not a phenomenon isolated to 2D media perception. Furthermore, the results show that audiences can tolerate a high level of both horizontal and vertical disparity and maintain veridical speech perception. We consider these results in terms of current stereoscopic filmmaking recommendations and practices. }, annote = {Dublin, Sept 2013}, author = {Deas, L. and Wilcox, L. M. and Kazimi, A. and Allison, R. S.}, booktitle = {Proceedings of the ACM Symposium on Applied Perception, Dublin, Ireland}, date-added = {2013-06-12 23:20:52 +0000}, date-modified = {2019-02-03 09:36:53 -0500}, doi = {10.1145/2492494.2492506}, keywords = {Stereopsis}, month = {09}, pages = {83-89}, title = {Audio-Visual Integration in Stereoscopic 3D}, url = {http://percept.eecs.yorku.ca/papers/p83-deas.pdf}, year = {2013}, url-1 = {http://percept.eecs.yorku.ca/papers/p83-deas.pdf}, url-2 = {https://doi.org/10.1145/2492494.2492506}}
@inproceedings{Vinnikov2013chi, address = {Paris, France}, author = {Vinnikov, M. and Allison, R. S.}, booktitle = {{ACM CHI} 2013 Workshop Gaze Interaction in the Post-{WIMP} World}, date-added = {2013-02-11 00:00:00 -0400}, date-modified = {2016-01-03 03:24:16 +0000}, keywords = {Eye Movements & Tracking}, month = {04}, pages = {VA13, 1-4}, title = {Gaze-Contingent Simulations of Visual Defects in Virtual Environment: Challenges and Limitations}, url = {http://gaze-interaction.net/wp-system/wp-content/uploads/2013/04/VA13.pdf}, url-1 = {http://gaze-interaction.net/wp-system/wp-content/uploads/2013/04/VA13.pdf}, year = {2013}, url-1 = {http://gaze-interaction.net/wp-system/wp-content/uploads/2013/04/VA13.pdf}}
@article{harris_relative_2012, abstract = {When illusory self-motion is induced in a stationary observer by optic flow, the perceived distance traveled is generally overestimated relative to the distance of a remembered target (Redlick, Harris, and Jenkin, 2001): subjects feel they have gone further than the simulated distance and indicate that they have arrived at a target's previously seen location too early. In this article we assess how the radial and laminar components of translational optic flow contribute to the perceived distance traveled. Subjects monocularly viewed a target presented in a virtual hallway wallpapered with stripes that periodically changed color to prevent tracking. The target was then extinguished and the visible area of the hallway shrunk to an oval region 40 deg(h) x 24 deg(v). Subjects either continued to look centrally or shifted their gaze eccentrically, thus varying the relative amounts of radial and laminar flow visible. They were then presented with visual motion compatible with moving down the hallway toward the target and pressed a button when they perceived that they had reached the target's remembered position. Data were modeled by the output of a leaky spatial integrator (Lappe, Jenkin, and Harris, 2007). The sensory gain varied systematically with viewing eccentricity while the leak constant was independent of viewing eccentricity. Results were modeled as the linear sum of separate mechanisms sensitive to radial and laminar optic flow. Results are compatible with independent channels for processing the radial and laminar flow components of optic flow that add linearly to produce large but predictable errors in perceived distance traveled.}, author = {Harris, Laurence R. and Herpers, Rainer and Jenkin, Michael and Allison, Robert S. and Jenkin, Heather and Kapralos, Bill and Scherfgen, David and Felsner, Sandra}, date-added = {2013-02-07 01:40:26 +0000}, date-modified = {2013-02-07 01:40:26 +0000}, doi = {10.1167/12.10.7}, file = {Full Text PDF:/Users/robertallison/Library/Application Support/Firefox/Profiles/thhmbgl4.default/zotero/storage/VNA249TC/Harris et al. - 2012 - The relative contributions of radial and laminar o.pdf:application/pdf;Snapshot:/Users/robertallison/Library/Application Support/Firefox/Profiles/thhmbgl4.default/zotero/storage/PCG2IGFI/7.html:text/html}, issn = {1534-7362}, journal = {Journal of Vision}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, language = {en}, month = 09, number = {10}, title = {The relative contributions of radial and laminar optic flow to the perception of linear self-motion}, url = {http://www.journalofvision.org/content/12/10/7}, url-1 = {http://www.journalofvision.org/content/12/10/7}, url-2 = {http://dx.doi.org/10.1167/12.10.7}, volume = {12}, year = {2012}, url-1 = {http://www.journalofvision.org/content/12/10/7}, url-2 = {https://doi.org/10.1167/12.10.7}}
@article{Tsirlin01112012, abstract = {In conventional stereopsis, the depth between two objects is computed based on the retinal disparity in the position of matching points in the two eyes. When an object is occluded by another object in the scene, so that it is visible only in one eye, its retinal disparity cannot be computed. Nakayama and Shimojo (1990) found that a percept of quantitative depth between the two objects could still be established for such stimuli and proposed that this percept is based on the constraints imposed by occlusion geometry. They named this and other occlusion-based depth phenomena ``da Vinci stereopsis.'' Subsequent research found quantitative depth based on occlusion geometry in several other classes of stimuli grouped under the term da Vinci stereopsis. However, Nakayama and Shimojo's findings were later brought into question by Gillam, Cook, and Blackburn (2003), who suggested that quantitative depth in their stimuli was perceived based on conventional disparity. In order to understand whether da Vinci stereopsis relies on one type of mechanism or whether its function is stimulus dependent we examine the nature and source of depth in the class of stimuli used by Nakayama and Shimojo (1990). We use three different psychophysical and computational methods to show that the most likely source for depth in these stimuli is occlusion geometry. Based on these experiments and previous data we discuss the potential mechanisms responsible for processing depth from monocular features in da Vinci stereopsis.}, author = {Tsirlin, Inna and Wilcox, Laurie M. and Allison, Robert S.}, date-added = {2012-11-04 23:57:04 +0000}, date-modified = {2012-11-04 23:57:19 +0000}, doi = {10.1167/12.12.2}, eprint = {http://www.journalofvision.org/content/12/12/2.full.pdf+html}, journal = {Journal of Vision}, keywords = {Stereopsis}, number = {12}, title = {Da Vinci decoded: Does da Vinci stereopsis rely on disparity?}, url = {http://www.journalofvision.org/content/12/12/2.abstract}, url-1 = {http://www.journalofvision.org/content/12/12/2.abstract}, url-2 = {http://dx.doi.org/10.1167/12.12.2}, volume = {12}, year = {2012}, url-1 = {http://www.journalofvision.org/content/12/12/2.abstract}, url-2 = {https://doi.org/10.1167/12.12.2}}
@article{Allison:2012uq, abstract = {Successful adaptation to the microgravity environment of space and readaptation to gravity on earth requires recalibration of visual and vestibular signals. Recently, we have shown that adding simulated viewpoint oscillation to visual self-motion displays produces more compelling vection (despite the expected increase in visual-vestibular conflict experienced by stationary observers). Currently, it is unclear what role adaptation to gravity might play in this oscillation-based vection advantage. The vection elicited by optic flow displays simulating either smooth forward motion or forward motion perturbed by viewpoint oscillation was assessed before, during and after microgravity exposure in parabolic flight. During normal 1-g conditions subjects experienced significantly stronger vection for oscillating compared to smooth radial optic flow. The magnitude of this oscillation enhancement was reduced during short-term microgravity exposure, more so for simulated interaural (as opposed to spinal) axis viewpoint oscillation. We also noted a small overall reduction in vection sensitivity post-flight. A supplementary experiment found that 1-g vection responses did not vary significantly across multiple testing sessions. These findings: (i) demonstrate that the oscillation advantage for vection is very stable and repeatable during 1-g conditions and (ii) imply that adaptation or conditioned responses played a role in the post-flight vection reductions. The effects observed in microgravity are discussed in terms of the ecology of terrestrial locomotion and the nature of movement in microgravity. }, author = {Allison, R.S. and Zacher, J. E. and Kirollos, R. and Guterman, P.S. and Palmisano, S.A.}, date-added = {2012-09-13 03:57:33 +0000}, date-modified = {2014-09-26 01:48:02 +0000}, doi = {10.1007/s00221-012-3275-5}, journal = {Experimental Brain Research}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {4}, pages = {479-487}, title = {Perception of smooth and perturbed vection in short-duration microgravity}, url = {http://percept.eecs.yorku.ca/papers/microg preprint 2012.pdf}, url-1 = {http://dx.doi.org/10.1007/s00221-012-3275-5}, volume = {223}, year = {2012}, url-1 = {http://percept.eecs.yorku.ca/papers/microg%20preprint%202012.pdf}, url-2 = {https://doi.org/10.1007/s00221-012-3275-5}}
@article{Guterman:fk, abstract = {Sensory conflict theories predict that adding simulated viewpoint oscillation to self-motion displays should generate significant and sustained visual-vestibular conflict and reduce the likelihood of illusory self-motion (vection). However, research shows that viewpoint oscillation enhances vection in upright observers. This study examined whether the oscillation advantage for vection depends on head orientation with respect to gravity. Displays that simulated forward/backward self-motion with/without horizontal and vertical viewpoint oscillation were presented to observers in upright (seated and standing) and lying (supine, prone, and left side down) body postures. Viewpoint oscillation was found to enhance vection for all of the body postures tested. Vection also tended to be stronger in upright postures than in lying postures. Changing the orientation of the head with respect to gravity was expected to alter the degree/saliency of the sensory conflict, which may explain the overall posture-based differences in vection strength. However, this does not explain why the oscillation advantage for vection persisted for all postures. Thus, the current postural and oscillation based vection findings appear to be better explained by ecology: Upright postures and oscillating flow (that are the norm during self-motion) improved vection, whereas lying postures and smooth optic flows (which are less common) impaired vection.}, author = {Guterman, P. and Allison, R. S. and Palmisano, S.A. and Zacher, J. E.}, date-added = {2012-05-25 23:31:32 -0400}, date-modified = {2014-09-26 01:49:30 +0000}, doi = {10.3233/VES-2012-0448}, journal = {Journal of Vestibular Research}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {2-3}, pages = {105-116}, title = {Influence of head orientation and viewpoint oscillation on linear vection}, url = {http://percept.eecs.yorku.ca/papers/Guterman 2012.pdf}, url-1 = {http://dx.doi.org/10.3233/VES-2012-0448}, volume = {22}, year = {2012}, url-1 = {http://percept.eecs.yorku.ca/papers/Guterman%202012.pdf}, url-2 = {https://doi.org/10.3233/VES-2012-0448}}
@article{Banks:2012kx, abstract = {Stereoscopic displays have become very important for many applications, including operation of remote devices, medical imaging, surgery, scientific visualization, computer-assisted design, and more. But the most significant and exciting development is the incorporation of stereo technology into entertainment: specifically, cinema, television, and video games. It is important in these applications for stereo 3D imagery to create a faithful impression of the 3D structure of the scene being portrayed. It is also important that the viewer is comfortable and does not leave the experience with eye fatigue or a headache. And that the presentation of the stereo images does not create temporal artifacts like flicker or motion judder. Here we review current research on stereo human vision and how it informs us about how best to create and present stereo 3D imagery. The paper is divided into four parts: 1) Getting the geometry right; 2) depth cue interactions in stereo 3D media; 3) focusing and fixating on stereo images; and 4) temporal presentation protocols: Flicker, motion artifacts, and depth distortion. }, author = {Martin S. Banks and Jenny R. Read and Robert S. Allison and Simon J. Watt}, date-added = {2012-04-30 18:57:25 -0400}, date-modified = {2013-07-18 04:25:20 +0000}, doi = {10.5594/j18173}, journal = {SMPTE Motion Imaging (Winner of 2013 SMPTE Journal Certificate of Merit, also appears in SMPTE International Conference on Stereoscopic 3D for Media and Entertainment Conference proceedings)}, keywords = {Stereopsis}, number = {4}, pages = {24-43}, title = {Stereoscopy and the Human Visual System}, url-1 = {http://dx.doi.org/10.5594/j18173}, url-2 = {http://dx.doi.org/10.5594/j18173}, volume = {121}, year = {2012}, url-1 = {https://doi.org/10.5594/j18173}}
@article{Sakano:2011kx, abstract = {We examined whether a negative motion aftereffect occurs in the depth direction following adaptation to motion in depth based on changing disparity and/or interocular velocity differences. To dissociate these cues, we used three types of adapters: random-element stereograms that were correlated (1) temporally and binocularly, (2) temporally but not binocularly, and (3) binocularly but not temporally. Only the temporally correlated adapters contained coherent interocular velocity differences while only the binocularly correlated adapters contained coherent changing disparity. A motion aftereffect in depth occurred after adaptation to the temporally correlated stereograms while little or no aftereffect occurred following adaptation to the temporally uncorrelated stereograms. Interestingly, a monocular test pattern also showed a comparable motion aftereffect in a diagonal direction in depth after adaptation to the temporally correlated stereograms. The lack of the aftereffect following adaptation to pure changing disparity was also confirmed using spatially separated random-dot patterns. These results are consistent with the existence of a mechanism sensitive to interocular velocity differences, which is adaptable (at least in part) at binocular stages of motion-in-depth processing. We did not find any evidence for the existence of an ``adaptable'' mechanism specialized to see motion in depth based on changing disparity. }, author = {Sakano, Y. and Allison, R.S. and Howard, I.P.}, date-added = {2011-12-16 18:52:35 +0000}, date-modified = {2019-02-03 09:07:15 -0500}, doi = {10.1167/12.1.11}, journal = {Journal of Vision}, keywords = {Motion in depth, Stereopsis}, number = {1, Article 11:}, pages = {1--15}, title = {Motion aftereffect in depth based on binocular information}, url-1 = {http://dx.doi.org/10.1167/12.1.11}, volume = {12}, year = {2012}, url-1 = {https://doi.org/10.1167/12.1.11}}
@article{Tsirlin:2011pa, abstract = {We describe a perceptual asymmetry found in stereoscopic perception of overlaid random-dot surfaces. Specifically, the minimum separation in depth needed to perceptually segregate two overlaid surfaces depended on the distribution of dots across the surfaces. With the total dot density fixed, significantly larger inter-plane disparities were required for perceptual segregation of the surfaces when the front surface had fewer dots than the back surface compared to when the back surface was the one with fewer dots. We propose that our results reflect an asymmetry in the signal strength of the front and back surfaces due to the assignment of the spaces between the dots to the back surface by disparity interpolation. This hypothesis was supported by the results of two experiments designed to reduce the imbalance in the neuronal response to the two surfaces. We modeled the psychophysical data with a network of inter-neural connections: excitatory within-disparity and inhibitory across disparity, where the spread of disparity was modulated according to figure-ground assignment. These psychophysical and computational findings suggest that stereoscopic transparency depends on both inter-neural interactions of disparity-tuned cells and higher-level processes governing figure ground segregation.}, author = {Tsirlin, I. and Allison, R.S. and Wilcox, L.M.}, date-added = {2011-11-17 19:32:31 -0500}, date-modified = {2014-09-26 01:22:44 +0000}, doi = {10.1016/j.visres.2011.11.013}, journal = {Vision Research}, keywords = {Stereopsis}, month = {02}, number = {1}, pages = {1-11}, title = {Perceptual asymmetry reveals neural substrates underlying stereoscopic transparency}, url = {http://percept.eecs.yorku.ca/papers/asymmetry.pdf}, url-1 = {http://dx.doi.org/10.1016/j.visres.2011.11.013}, volume = {54}, year = {2012}, url-1 = {http://percept.eecs.yorku.ca/papers/asymmetry.pdf}, url-2 = {https://doi.org/10.1016/j.visres.2011.11.013}}
@article{Allison:2012fk, abstract = {The hypothesis that artefacts related to chromatic aberration from eyeglasses may be more objectionable in laser projectors compared to conventional digital projectors was investigated. Untrained observers viewed movie clips in a theater and made image quality ratings. The same four clips were presented on both a standard Xenon display and a prototype laser projector in separate blocks. There was no evidence that observers noticed color break-up artefacts using either mode of presentation.}, author = {Allison, R.S. and Irving, E.L. and Babu, R. and Lillakas, L. and Guthrie, S. and Wilcox, L.M.}, date-added = {2011-09-04 03:06:31 -0400}, date-modified = {2014-09-26 01:52:45 +0000}, doi = {10.1109/JDT.2011.2170957}, journal = {IEEE Journal of Display Technology}, keywords = {Misc.}, number = {4}, pages = {186 - 193}, title = {Visibility of Color Breakup Phenomena in Displays based on Narrowband Spectral Sources.}, url = {http://percept.eecs.yorku.ca/papers/JDT2170957.pdf}, url-1 = {http://dx.doi.org/10.1109/JDT.2011.2170957}, volume = {8}, year = {2012}, url-1 = {http://percept.eecs.yorku.ca/papers/JDT2170957.pdf}, url-2 = {https://doi.org/10.1109/JDT.2011.2170957}}
@article{Tsirlin:2012ys, abstract = {Stereoscopic displays must present separate images to the viewer's left and right eyes. Crosstalk is the unwanted contamination of one eye's image from the image of the other eye. It has been shown to cause distortions, reduce image quality and visual comfort and increase perceived workload when performing visual tasks. Crosstalk also affects one's ability to perceive stereoscopic depth although little consideration has been given to the perception of depth magnitude in the presence of crosstalk. In this paper we extend a previous study (Tsirlin, Allison \& Wilcox, 2010, submitted) on the perception of depth magnitude in stereoscopic occluding and non-occluding surfaces to the special case of crosstalk in thin structures. Crosstalk in thin structures differs qualitatively from that in larger objects due to the separation of the ghost and real images and thus theoretically could have distinct perceptual consequences. To address this question we used a psychophysical paradigm, where observers estimated the perceived depth difference between two thin vertical bars using a measurement scale. Our data show that crosstalk degrades perceived depth. As crosstalk levels increased the magnitude of perceived depth decreased, especially for stimuli with larger relative disparities. In contrast to the effect of crosstalk on depth magnitude in larger objects, in thin structures, a significant detrimental effect was found at all disparities. Our findings, when considered with the other perceptual consequences of crosstalk, suggest that its presence in S3D media even in modest amounts will reduce observers' satisfaction.}, author = {Tsirlin, I. and Allison, R.S. and Wilcox, L.M.}, date-added = {2011-08-10 13:41:50 -0400}, date-modified = {2018-11-25 14:40:06 -0500}, doi = {10.1117/1.JEI.21.1.011003}, journal = {Journal of Electronic Imaging (an earlier version also published in Electronic Imaging 2012: Stereoscopic Displays and Applications)}, keywords = {Stereopsis}, pages = {011003.1-8}, title = {The effect of crosstalk on depth magnitude in thin structures}, url = {http://percept.eecs.yorku.ca/inna_jei.pdf}, url-1 = {http://dx.doi.org/10.1117/1.JEI.21.1.011003}, volume = {21}, year = {2012}, url-1 = {http://percept.eecs.yorku.ca/inna_jei.pdf}, url-2 = {https://doi.org/10.1117/1.JEI.21.1.011003}}
@inbook{Allison:2012fj, address = {New York}, author = {Allison, R.S and Howard, IP.}, booktitle = {Perceiving in Depth, Volume 2: {S}tereoscopic Vision}, date-added = {2012-07-02 19:43:53 -0400}, date-modified = {2012-07-02 20:46:45 -0400}, doi = {10.1093/acprof:oso/9780199764150.001.0001}, editor = {I. Howard and B.J. Rogers}, keywords = {Depth perception}, pages = {40-50}, publisher = {Oxford University Press}, title = {Models of Disparity Detectors}, url-1 = {http://dx.doi.org/10.1093/acprof:oso/9780199764150.001.0001}, year = {2012}, url-1 = {https://doi.org/10.1093/acprof:oso/9780199764150.001.0001}}
@incollection{Harris:2012uq, annote = {New orleans October 13 -- 17, 2012}, author = {Harris, L. R. and Herpers, R. and Jenkin, M. and Allison, R. S. and Jenkin, H. and Kaprolos, B. and Scherfgen, D. and Felsner, S.}, booktitle = {Society for Neuroscience Abstracts}, date-added = {2013-01-22 01:45:23 +0000}, date-modified = {2013-01-22 01:46:14 +0000}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {672.14}, publisher = {Society for Neuroscience}, title = {Optic flow and self-motion perception: The contribution of different parts of the field}, url-1 = {http://www.yorku.ca/harris/pubs/sfn_2012_bonn_rhein_sieg.pdf}, year = {2012}}
@incollection{tsirlin_effect_2012, abstract = {Crosstalk in stereoscopic displays is defined as the leakage of one eye's image into the image of the other eye. All popular commercial stereoscopic viewing systems, including the ones used in movie theaters, suffer from crosstalk to some extent. It has been shown that crosstalk causes image distortions and reduces image quality. Moreover, it decreases visual comfort and affects one's ability to discriminate object shape and judge the relative depth of two objects. These results have potentially important implications for the quality and the accuracy of depth percepts in 3d display systems. To asses this hypothesis directly, we have explored the effect of crosstalk on the perceived magnitude of depth in a variety of stereoscopic stimuli. We found that with simple synthetic images increasing crosstalk beyond four percent resulted in a significant decrease in the magnitude of perceived depth, especially for larger disparities. This degradation was largely independent of the spatial separation of the ghost image. Further, we found qualitatively and quantitatively similar detrimental effects of crosstalk on perceived depth in complex images of natural scenes. The consistency of the negative impact of crosstalk, regardless of image complexity, suggests that it is not ameliorated by the presence of pictorial depth cues. We have recommended that display manufacturers keep crosstalk levels below the critical value of four percent to achieve optimal depth quality. Meeting abstract presented at {OSA} Fall Vision 2012}, author = {Tsirlin, Inna and Wilcox, Laurie M. and Allison, Robert S.}, booktitle = {OSA Fall Vision 2012, Journal of Vision}, date-added = {2012-12-28 12:53:19 +0000}, date-modified = {2012-12-28 13:03:08 +0000}, doi = {10.1167/12.14.4}, issn = {1534-7362}, journal = {Journal of Vision}, keywords = {Stereopsis}, language = {en}, month = 12, note = {Sept 2012 Rochester, NY}, number = {14}, pages = {4--4}, title = {The effect of crosstalk on perceived depth in {3D} displays}, url = {http://www.journalofvision.org/content/12/14/4}, url-1 = {http://www.journalofvision.org/content/12/14/4}, url-2 = {http://dx.doi.org/10.1167/12.14.4}, urldate = {2012-12-28}, volume = {12}, year = {2012}, url-1 = {http://www.journalofvision.org/content/12/14/4}, url-2 = {https://doi.org/10.1167/12.14.4}}
@incollection{guterman_postural_2012, abstract = {Adding viewpoint oscillation to displays increases the likelihood of visually induced self-motion (vection), even though sensory conflict theories predict that it should generate significant and sustained visual-vestibular conflict. This effect has been shown in upright observers, for which the simulated self-motion and oscillation were congruent with or orthogonal to gravity. Here we examined whether this oscillation advantage for vection depends on the orientation of the body with respect to gravity. Observers in upright (seated and standing) and lying (supine, prone, and left side down) postures viewed displays of radial optic flow simulating forward/backward self-motion, with or without horizontal or vertical viewpoint oscillation. Vection magnitude (compared to a reference stimulus), onset, duration, and vection dropouts, were compared among postures. Viewpoint oscillation enhanced vection for all of the body postures tested. Vection also tended to be stronger in upright than in lying postures. Changing body orientation with respect to gravity was expected to alter the degree/saliency of the sensory conflict, and may explain the posture-based differences in vection magnitude. However, this does not explain why the oscillation advantage for vection persisted for all postures. Given that the upright posture and oscillating flow (the norm during real self-motion) improved vection, and lying postures and smooth flow (which are atypical in our experience of self-motion) impaired vection, we conclude that postural and oscillation based vection findings are better explained by ecology. Meeting abstract presented at {VSS} 2012}, author = {Guterman, Pearl S. and Allison, Robert S. and Palmisano, Stephen and Zacher, James E.}, booktitle = {Journal of Vision (VSS Abstract)}, date-added = {2012-08-11 12:48:36 +0000}, date-modified = {2012-08-11 12:48:36 +0000}, doi = {10.1167/12.9.576}, file = {Snapshot:/Users/allison/Library/Application Support/Firefox/Profiles/xdf9xly7.default/zotero/storage/HWDAH4RK/576.html:text/html}, issn = {, 1534-7362}, journal = {Journal of Vision}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = 08, number = {9}, pages = {576--576}, title = {Postural and viewpoint oscillation effects on the perception of self-motion.}, url = {http://www.journalofvision.org/content/12/9/576}, url-1 = {http://www.journalofvision.org/content/12/9/576}, url-2 = {http://dx.doi.org/10.1167/12.9.576}, volume = {12}, year = {2012}, url-1 = {http://www.journalofvision.org/content/12/9/576}, url-2 = {https://doi.org/10.1167/12.9.576}}
@incollection{lugtigheid_depth_2012, abstract = {It is well-established that stereoscopic depth is obtained over a large range of retinal disparities, including those that produce diplopia (double images). Under normal viewing conditions, observers make vergence eye movements to minimize large disparities, and it has been suggested that observers judge depth sign for diplopic stimuli by monitoring the vergence signal. Here we ask if vergence eye movements are required to judge depth order (disparity sign) of diplopic stimuli. We created an open-loop stimulus by presenting stereoscopic afterimages, for which eye movements cannot provide feedback about depth sign or magnitude. We produced afterimages of line stereograms consisting of precision-milled slits in aluminum plates that were back-illuminated by a photographic flash. Each half-image consisted of two thin (1x10mm) vertical slits, positioned above and below a small (1mm) fixation {LED.} The half-images were viewed through a modified mirror stereoscope, so that the fused image formed two narrow bars in the mid-sagittal plane. On each trial, the upper and lower bars were displaced in depth by one of five equal and opposite disparities (two in the range of fusion, one zero and two that were diplopic). After each presentation, observers (n=15) judged which bar was closer to them. Observers reliably judged the sign of disparity for both diplopic and fused images. We conclude that judgments of disparity sign for diplopic stimuli do not depend on extraretinal information, but are recovered directly from the retinal disparity signal. Meeting abstract presented at {VSS} 2012}, author = {Lugtigheid, Arthur and Wilcox, Laurie and Allison, Robert S. and Howard, Ian}, booktitle = {Journal of Vision (VSS Abstract)}, date-added = {2012-08-11 12:48:36 +0000}, date-modified = {2013-01-22 01:43:55 +0000}, doi = {10.1167/12.9.451}, file = {Snapshot:/Users/allison/Library/Application Support/Firefox/Profiles/xdf9xly7.default/zotero/storage/7NB9PTHQ/451.html:text/html}, issn = {, 1534-7362}, journal = {Journal of Vision}, keywords = {Stereopsis}, month = 08, number = {9}, pages = {451--451}, title = {Depth from diplopic stimuli without vergence eye movements}, url = {http://www.journalofvision.org/content/12/9/451}, url-1 = {http://www.journalofvision.org/content/12/9/451}, url-2 = {http://dx.doi.org/10.1167/12.9.451}, volume = {12}, year = {2012}, url-1 = {http://www.journalofvision.org/content/12/9/451}, url-2 = {https://doi.org/10.1167/12.9.451}}
@incollection{carey_allocation_2012, abstract = {It has been shown that disparity can be used as a token for visual search (possibly pre-attentively). However, there has been no systematic investigation of the distribution of attentional resources across the disparity dimension. Here we evaluated whether position in depth, relative to the screen plane, influences attentional allocation. We conducted two experiments using the same visual search task but with different stimuli. In the first experiment, stimuli consisted of four simple geometric shapes and in the second experiment the stimuli consisted of four orientated lines enclosed by a circle. In both cases, the stimuli were arranged in an annulus about a central fixation marker. On each trial, observers indicated whether the target was present or not within the annular array. The distractor number was varied randomly on each trial (2, 4, 6, or 8) and the target was present on half of the trials. On all trials, one element had a disparity offset by 10 arcmin relative to the others. On half of target present trials the target was in the disparate location, on the remainder it was presented at the distractor disparity. Trials were further subdivided such that on equal numbers of trials the disparate element was on or off the plane of the screen. We measured search time, analysing only trials on which observers responded correctly. Both experiments showed that when the target was the disparate item, reaction time was significantly faster when the target was off the screen plane compared to at the screen plane. This was true for a range of crossed and uncrossed disparities. We conclude that there exists a selective attentional bias for stimuli lying off the screen plane. These data are the first evidence of a disparity-selective attentional bias that is not mediated by relative disparity. Meeting abstract presented at {VSS} 2012}, author = {Carey, Andrea and Wilcox, Laurie and Allison, Robert}, booktitle = {Journal of Vision (VSS Abstract)}, date-added = {2012-08-11 12:48:36 +0000}, date-modified = {2012-08-11 12:48:36 +0000}, doi = {10.1167/12.9.216}, file = {Snapshot:/Users/allison/Library/Application Support/Firefox/Profiles/xdf9xly7.default/zotero/storage/394NUPAI/216.html:text/html}, issn = {, 1534-7362}, journal = {Journal of Vision}, keywords = {Stereopsis}, month = 08, number = {9}, pages = {216--216}, title = {On the allocation of attention in stereoscopic displays}, url = {http://www.journalofvision.org/content/12/9/216}, url-1 = {http://www.journalofvision.org/content/12/9/216}, url-2 = {http://dx.doi.org/10.1167/12.9.216}, volume = {12}, year = {2012}, url-1 = {http://www.journalofvision.org/content/12/9/216}, url-2 = {https://doi.org/10.1167/12.9.216}}
@incollection{tsirlin_is_2012, abstract = {Depth from binocular disparity relies on finding matching points in the images of the two eyes. However, not all points have a corresponding match since some regions are visible to one eye only. These regions, known as monocular occlusions, play an important role in stereoscopic depth perception supporting both qualitative and quantitative depth percepts. However, it is debated whether these percepts could be signaled by the activity of disparity detectors or require cells specifically tuned to detect monocular occlusions. The goal of the present work is to assess the degree to which model disparity detectors are able to compute the direction and the amount of depth perceived from monocular occlusions. It has been argued that disparity-selective neurons in V1 essentially perform a cross-correlation on the images of the two eyes. Consequently, we have applied a windowed cross-correlation algorithm to several monocular occlusion stimuli presented in the literature (see also Harris \& Smith, {VSS} 2010). We computed depth maps and correlation profiles and measured the reliability and the strength of the disparity signal generated by cross-correlation. Our results show that although the algorithm is able to predict perceived depth in monocularly occluded regions for some stimuli, it fails to do so for others. Moreover, for virtually all monocularly occluded regions the reliability and the signal strength of depth estimates are low in comparison to estimates made in binocular regions. We also find that depth estimates for monocular areas are highly sensitive to the window size and the range of disparities used to compute the cross-correlation. We conclude that disparity detectors, at least those that perform cross-correlation, cannot account for all instances of depth perceived from monocular occlusions. A more complex mechanism, potentially involving monocular occlusion detectors, is required to account for depth in these stimuli. Meeting abstract presented at {VSS} 2012}, author = {Tsirlin, Inna and Allison, Robert and Wilcox, Laurie}, booktitle = {Journal of Vision (VSS Abstract)}, date-added = {2012-08-11 12:48:36 +0000}, date-modified = {2012-08-11 12:48:36 +0000}, doi = {10.1167/12.9.215}, file = {Snapshot:/Users/allison/Library/Application Support/Firefox/Profiles/xdf9xly7.default/zotero/storage/TM4N74ND/215.html:text/html}, issn = {, 1534-7362}, journal = {Journal of Vision}, keywords = {Stereopsis}, month = 08, number = {9}, pages = {215--215}, title = {Is depth in monocular regions processed by disparity detectors? A computational analysis.}, url = {http://www.journalofvision.org/content/12/9/215}, url-1 = {http://www.journalofvision.org/content/12/9/215}, url-2 = {http://dx.doi.org/10.1167/12.9.215}, volume = {12}, year = {2012}, url-1 = {http://www.journalofvision.org/content/12/9/215}, url-2 = {https://doi.org/10.1167/12.9.215}}
@incollection{ash_vection_2012, abstract = {The research on vection during treadmill locomotion appears contradictory. For example, Onimaru (2010) reported that walking forwards on a treadmill impaired the vection induced by expanding flow, whereas Seno et al (2011) appeared to find a vection enhancement in these conditions. These previous studies both examined smooth self-motion displays, despite the fact that jittering displays have consistently been shown to improve vection in seated observers. We simulated constant velocity expanding and contracting optic flow displays, in which subjects physical movements were either updated as additional display jitter (synchronised head-display motion) or not updated into the self-motion display. We also varied the display/treadmill forward speed -- these could be simulated at either 4 km/hr or 5 km/hr. Subjects viewed displays in real-time while walking on a treadmill or on the spot and as passive playbacks while stationary. Subjects rated their perceived strength of vection in depth using a joystick (compared to a standard reference stimulus). We found vection impairments for both expanding and contracting optic flow displays and similar impairments when subjects actively walked on the spot. Despite finding a general vection impairment for active walking, faster display/treadmill forward speeds and synchronised head-display jitter improved vection. It was concluded that vection impairments while walking appear to be independent of the display's simulated direction and the nature of one's walking activity. Meeting abstract presented at {VSS} 2012}, author = {Ash, April and Palmisano, Stephen and Allison, Robert}, booktitle = {Journal of Vision (VSS Abstract)}, date-added = {2012-08-11 12:48:36 +0000}, date-modified = {2012-08-11 12:48:36 +0000}, doi = {10.1167/12.9.181}, file = {Snapshot:/Users/allison/Library/Application Support/Firefox/Profiles/xdf9xly7.default/zotero/storage/GKTMX2P9/181.html:text/html}, issn = {, 1534-7362}, journal = {Journal of Vision}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = 08, number = {9}, pages = {181--181}, title = {Vection in depth during treadmill locomotion}, url = {http://www.journalofvision.org/content/12/9/181}, url-1 = {http://www.journalofvision.org/content/12/9/181}, url-2 = {http://dx.doi.org/10.1167/12.9.181}, volume = {12}, year = {2012}, url-1 = {http://www.journalofvision.org/content/12/9/181}, url-2 = {https://doi.org/10.1167/12.9.181}}
@inproceedings{Chen:2012uq, abstract = {The geometrical calibration of a high-definition camera rig is an important step for 3D film making and computer vision applications. Due to the large amount of image data in high-definition, maintaining execution speeds appropriate for on-set, on-line adjustment procedures is one of the biggest challenges for machine vision based calibration methods. Our aims are to provide a low-cost, fast and accurate system to calibrate both the intrinsic and extrinsic parameters of a stereo camera rig. We first propose a novel calibration target that we call marker chessboard to speed up the corner detection. Then we develop an automatic key frame selection algorithm to optimize frames used in calibration. We also propose a bundle adjustment method to overcome the geometrical inaccuracy of the chessboard. Finally we introduce an online stereo camera calibration system based on the above improvements. }, address = {Providence, Rhode Island}, author = {Chen, J. and Benzeroual, K. and Allison, R. S.}, booktitle = {IEEE Computer Society Conference on Computer Vision and Pattern Recognition, {3DC}ine Workshop ({CVPRW})}, date-added = {2012-04-30 18:54:41 -0400}, date-modified = {2014-09-26 02:24:14 +0000}, doi = {10.1109/CVPRW.2012.6238905}, keywords = {Stereopsis}, month = {06}, pages = {29-36}, title = {Calibration for High-Definition Camera Rigs with Marker Chessboard}, url = {http://percept.eecs.yorku.ca/papers/marker chessboard.pdf}, url-1 = {http://dx.doi.org/10.1109/CVPRW.2012.6238905}, year = {2012}, url-1 = {http://percept.eecs.yorku.ca/papers/marker%20chessboard.pdf}, url-2 = {https://doi.org/10.1109/CVPRW.2012.6238905}}
@inproceedings{Benzeroual:2012kx, abstract = {Over the last decade, advances in technology have made stereoscopic 3D (S3D) displays widely available with an ever-expanding variety of technologies, dimensions, resolution, optimal viewing angle and image quality. Of these, one of the most variable and unpredictable factors influencing the observer's S3D experience is the display size, which ranges from S3D mobile devices to large-format 3D movie theatres. This variety poses a challenge to 3D content makers who wish to preserve the three dimensional artistic context and avoid distortions and artefacts related to scaling. This paper will review the primary human factors issues related to S3D image scaling and the techniques and algorithms used to scale content. }, address = {Providence, Rhode Island}, author = {Benzeroual, K. and Allison, R. S. and Wilcox, L.M.}, booktitle = {IEEE Computer Society Conference on Computer Vision and Pattern Recognition, {3DC}ine Workshop ({CVPRW})}, date-added = {2012-04-30 18:54:41 -0400}, date-modified = {2014-09-26 02:24:30 +0000}, doi = {10.1109/CVPRW.2012.6238907}, keywords = {Stereopsis}, month = {06}, pages = {45-52}, title = {3D Display size matters: Compensating for the perceptual effects of {S3D} display scaling}, url-1 = {http://dx.doi.org/10.1109/CVPRW.2012.6238907}, year = {2012}, url-1 = {https://doi.org/10.1109/CVPRW.2012.6238907}}
@inproceedings{Laldin:2012vn, abstract = { In stereoscopic vision, there is non-linear mapping between real space and disparity. In a stereoscopic 3D scene, this non-linear mapping could produce distortions of space when camera geometry differs from natural stereoscopic geometry. When the viewing distance and zero screen parallax setting are held constant and interaxial separation (IA) is varied, there is an asymmetric distortion in the mapping of stereoscopic to real space. If an object traverses this space at constant velocity, one might anticipate distortion of the perceived trajectory. This prediction is based on the premise that when the object traverses compressed space, it should appear to move slower than when it passes through expanded space. In addition, this effect should depend on the saliency of the depth information in the scene. To determine if the predicted distortions are in fact perceived, we assessed observers' percepts of acceleration and deceleration using an animation of a ball moving in depth through a simulated environment, viewed stereoscopically. The method of limits was used to measure transition points between perceived acceleration and deceleration as a function of IA and context (textured vs. non-textured background). Eleven observers with normal binocular vision were tested using four IAs (35, 57.4, 65.7, and 68.21mm). The range of acceleration / deceleration rates presented was selected to bracket the predicted values based on the IA and the viewing geometry. Two environments were used to provide different levels of monocular depth cues, specifically an untextured and a tiled ground plane. For each environment and IA combination, four measures were made of the transition points between perceived acceleration and deceleration. For two of these measures, the series of clips began with an obviously accelerating object and progressed to an obviously decelerating object. The participants' task was to identify the point at which the percept changed from accelerating to decelerating. In the other two measures, the converse procedure was used to identify the deceleration to acceleration transition. Based on binocular geometry, we predicted that the transition points would shift toward deceleration for small IA and towards acceleration for large IA. This effect should be modulated by monocular depth cues. However, the average transition values were not influenced by IA or the simulated environment. These data suggest that observers are able to discount distortions of stereoscopic space in interpreting the trajectory of objects moving through simple environments. It remains to be seen if velocity constancy will be similarly maintained in more complex scenes or scenes containing multiple moving objects. These results have important implications for the rendering or capture of effective stereoscopic 3D content.}, author = {Laldin, S. and Wilcox, L. and Hylton, C. and Allison, R.S.}, booktitle = {Electronic Imaging: Stereoscopic Displays and Applications}, date-added = {2011-08-10 13:35:10 -0400}, date-modified = {2014-09-26 02:09:04 +0000}, doi = {10.1117/12.910577}, keywords = {Stereopsis}, month = {01}, pages = {82880N1-82880N11}, publisher = {SPIE-Int Soc Optical Engineering}, title = {Motion in depth constancy in stereoscopic displays}, url = {http://percept.eecs.yorku.ca/papers/82880N_1.pdf}, url-1 = {http://dx.doi.org/10.1117/12.910577}, volume = {8288}, year = {2012}, url-1 = {http://percept.eecs.yorku.ca/papers/82880N_1.pdf}, url-2 = {https://doi.org/10.1117/12.910577}}
@inproceedings{Tsirlin:2012kx, abstract = {Crosstalk remains an important determinant of S3D image quality. Defined as the leakage of one eye's image into the image of the other eye crosstalk affects all commercially available stereoscopic viewing systems. It is well established that crosstalk decreases perceived image quality and causes image distortion (Seuntiens et al. 2005, Wilcox & Stewart, 2002). Moreover, visual comfort decreases and perceived workload increases with increasing crosstalk (Kooi and Toet, 2004; Lambooij, 2010; Pala et al. 2007). In a series of experiments we have shown that crosstalk also affects perceived depth magnitude (Tsirlin et al. 2011a; Tsirlin et al. 2011b). In our previous experiments we used two white bars on a black background, and measured the perceived depth between the bars as a function of disparity and degree of crosstalk. The data showed that as crosstalk increased perceived depth decreased. This effect was intensified for larger disparities. We found the effect was present regardless of whether the ghost image was spatially separated from, or overlapped with, the original image. The experiments described here extend our previous work to complex images of natural scenes. We controlled crosstalk levels by simulating them in images presented on a zero-crosstalk mirror stereoscope display. The stimulus was a color image of our laboratory that showed a cluttered scene composed of furniture and objects. The observers were asked to estimate the amount of stereoscopic depth between pairs of objects in the scene. We used two different estimation methods - a virtual measurement scale and a disparity probe. Data show that, as was the case with simple line stimuli, depth in this natural scene was dramatically affected by crosstalk. As crosstalk increased perceived depth decreased; an effect that grew with increasing disparity. Interestingly, observers overestimated the depth in displays that contained no crosstalk. We propose that this overestimation is the result of the presence of pictorial cues to depth (perspective, texture gradients etc.) and familiarity with the real size of the objects depicted in the image. This hypothesis was confirmed by a control experiment where observers estimated depth in the same natural scene presented in 2D instead of S3D. Although there was no stereoscopic depth in this case, observers still perceived some depth between object pairs. Some observers spontaneously reported nausea and headaches after performing the task in S3D, which confirms previous findings that crosstalk causes discomfort in viewers (Kooi and Toet, 2004). Taken together these results show that our previous findings generalize to natural scenes showing that crosstalk affects perceived depth magnitude even in the presence of pictorial depth cues. Our data underscore the fact that crosstalk is a serious challenge to the quality of S3D media and has to be carefully addressed by display manufacturers. }, author = {Tsirlin, I. and Allison, R.S. and Wilcox, L.M.}, booktitle = {Electronic Imaging: Stereoscopic Displays and Applications}, date-added = {2011-08-10 13:32:19 -0400}, date-modified = {2016-01-03 03:23:15 +0000}, doi = {10.1117/12.906751}, keywords = {Stereopsis}, pages = {82880W, 1-9}, publisher = {SPIE-Int Soc Optical Engineering}, title = {Crosstalk reduces the amount of depth seen in 3D images of natural scenes}, url = {http://percept.eecs.yorku.ca/papers/82880W_1.pdf}, url-1 = {http://dx.doi.org/10.1117/12.906751}, volume = {8288}, year = {2012}, url-1 = {http://percept.eecs.yorku.ca/papers/82880W_1.pdf}, url-2 = {https://doi.org/10.1117/12.906751}}
@techreport{Allison:2012CSA, author = {Allison, R.S.}, date-added = {2013-04-05 16:53:19 +0000}, date-modified = {2013-10-06 21:10:52 +0000}, institution = {Canadian Space Agency Space Exploration Projects}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {Contract no. 9F007-091472}, title = {Visual Perception of Smooth and Perturbed Self-Motion in Microgravity, Final Report}, year = {2012}}
@techreport{Stefanie:2012uq, author = {Ruel, S. and Allison, R.S.}, date-added = {2013-01-01 16:43:51 +0000}, date-modified = {2013-01-01 16:46:24 +0000}, institution = {Canadian Space Agency Space Exploration Projects}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {CSA-SEEJITTER-RD-0001}, title = {SeeJitter Experiment Requirements Document (ERD)}, year = {2012}}
@article{tsirlin_disparity_2011, abstract = {Monocular occlusions have been shown to play an important role in stereopsis. Among other contributions to binocular depth perception, monocular occlusions can create percepts of illusory occluding surfaces. It has been argued that the precise location in depth of these illusory occluders is based on the constraints imposed by occlusion geometry. Tsirlin et al. (2010) proposed that when these constraints are weak, the depth of the illusory occluder can be biased by a neighboring disparity-defined feature. In the present work we test this hypothesis using a variety of stimuli. We show that when monocular occlusions provide only partial constraints on the magnitude of depth of the illusory occluders, the perceived depth of the occluders can be biased by disparity-defined features in the direction unrestricted by the occlusion geometry. Using this disparity bias phenomenon we also show that in illusory occluder stimuli where disparity information is present, but weak, most observers rely on disparity while some use occlusion information instead to specify the depth of the illusory occluder. Taken together our experiments demonstrate that in binocular depth perception disparity and monocular occlusion cues interact in complex ways to resolve perceptual ambiguity.}, author = {Tsirlin, Inna and Wilcox, Laurie M. and Allison, Robert S.}, date-added = {2011-08-01 19:29:50 -0400}, date-modified = {2014-09-26 01:54:13 +0000}, doi = {16/j.visres.2011.05.012}, issn = {0042-6989}, journal = {Vision Research}, keywords = {Stereopsis}, month = 07, number = {14}, pages = {1699--1711}, title = {Disparity biasing in depth from monocular occlusions}, url = {http://percept.eecs.yorku.ca/papers/disparity bias.pdf}, url-1 = {http://dx.doi.org/16/j.visres.2011.05.012}, volume = {51}, year = {2011}, url-1 = {http://percept.eecs.yorku.ca/papers/disparity%20bias.pdf}, url-2 = {https://doi.org/16/j.visres.2011.05.012}}
@article{Howard:2011qy, abstract = {Before methods for drawing accurately in perspective were developed in the 15th century, many artists drew with divergent perspective. But we found that many university students draw with divergent perspective rather than with the correct convergent perspective. These experiments were designed to reveal why people tend to draw with divergent perspective. University students drew a cube and isolated edges and surfaces of a cube. Their drawings were very inaccurate. About half the students drew with divergent perspective like artists before the 15th century. Students selected a cube from a set of tapered boxes with great accuracy and were reasonably accurate in selecting the correct drawing of a cube from a set of tapered drawings. Each subject's drawing was much worse than the drawing selected as accurate. An analysis of errors in drawings of a cube and of isolated edges and surfaces of a cube revealed several factors that predispose people to draw in divergent perspective. The way these factors intrude depends on the order in which the edges of the cube are drawn. }, author = {Howard, I.P. and Allison, R.S.}, date-added = {2011-05-22 22:06:30 -0400}, date-modified = {2014-09-26 01:56:17 +0000}, doi = {10.1068/p6876}, journal = {Perception}, keywords = {Depth perception}, number = {9}, pages = {1017-1033}, title = {Drawing with divergent perspective, ancient and modern}, url = {http://percept.eecs.yorku.ca/papers/drawing in perspective.pdf}, url-1 = {http://dx.doi.org/10.1068/p6876}, volume = {40}, year = {2011}, url-1 = {http://percept.eecs.yorku.ca/papers/drawing%20in%20perspective.pdf}, url-2 = {https://doi.org/10.1068/p6876}}
@article{Palmisano:2011lr, abstract = {Sensory conflict has been used to explain the way we perceive and control our self-motion, as well as the aetiology of motion sickness. However, recent research on simulated viewpoint jitter provides a strong challenge to one core prediction of these theories --- that increasing sensory conflict should always impair visually induced illusions of self-motion (known as vection). These studies show that jittering self-motion displays (thought to generate significant and sustained visual-vestibular conflict) actually induce superior vection to comparable non-jittering displays (thought to generate only minimal/transient sensory conflict). Here we review viewpoint jitter effects on vection, postural sway, eye-movements and motion sickness, and relate them to recent behavioural and neurophysiological findings. It is shown that jitter research provides important insights into the role that sensory interaction plays in self-motion perception. }, author = {Palmisano, S.A. and Allison, R.S. and Kim, J. and Bonato, F.}, date-added = {2011-05-22 21:11:25 -0400}, date-modified = {2014-09-26 01:58:53 +0000}, doi = {10.1163/187847511X570817}, journal = {Seeing and Perceiving}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {173-200}, title = {Visual Jitter Shakes Conflict Accounts of Self-motion Perception}, url = {http://percept.eecs.yorku.ca/papers/jitter review.pdf}, url-1 = {http://dx.doi.org/10.1163/187847511X570817}, volume = {24}, year = {2011}, url-1 = {http://percept.eecs.yorku.ca/papers/jitter%20review.pdf}, url-2 = {https://doi.org/10.1163/187847511X570817}}
@article{Tsirlin:2011mf, abstract = {Crosstalk in stereoscopic displays is defined as the leakage of one eye's image into the image of the other eye. All popular commercial stereoscopic systems suffer from crosstalk to some extent. Studies show that crosstalk causes distortions, reduces image quality and visual comfort, and increases perceived workload. Moreover, there is evidence that crosstalk effects depth perception from disparity. In the present paper we present two experiments. The first addresses the effect of crosstalk on the perceived magnitude of depth from disparity. The second examines the effect of crosstalk on the magnitude of depth perceived from monocular occlusions. Our data show that crosstalk has a detrimental effect on depth perceived from both cues, but it has a stronger effect on depth from monocular occlusions. Our findings taken together with previous results suggest that crosstalk, even in modest amounts, noticeably degrades the quality of stereoscopic images.}, author = {Tsirlin, I. and Wilcox, L.M. and Allison, R.S.}, date-added = {2011-05-09 12:26:50 -0400}, date-modified = {2012-07-02 13:42:36 -0400}, doi = {10.1109/TBC.2011.2105630}, issn = {0018-9316}, journal = {IEEE Transactions on Broadcasting}, keywords = {Stereopsis}, number = {2}, pages = {445-453}, title = {The effect of crosstalk on the perceived depth from disparity and monocular occlusions}, url-1 = {http://dx.doi.org/10.1109/TBC.2011.2105630}, url-2 = {http://dx.doi.org/10.1109/TBC.2011.2105630}, volume = {57}, year = {2011}, url-1 = {https://doi.org/10.1109/TBC.2011.2105630}}
@inbook{Allison:2010gc, abstract = {This chapter is a review of stereoscopic processes involved in the perception of motion in depth. We will first discuss mechanisms that could be used to process changing disparity signals to motion in depth. We will then review the evidence, some which has not been published previously, concerning which of these mechanisms is used by the visual system}, address = {Cambridge UK}, author = {Allison, R.S. and Howard, I.P.}, booktitle = {Vision in 3{D} environments}, date-added = {2011-05-06 11:00:25 -0400}, date-modified = {2014-09-26 02:39:36 +0000}, editor = {L. Harris and M. Jenkin}, keywords = {Motion in depth}, pages = {163-186}, publisher = {Cambridge University Press}, title = {Stereoscopic Motion in Depth}, url = {http://percept.eecs.yorku.ca/papers/cvr motion in depth chapter submit.pdf}, url-1 = {http://percept.eecs.yorku.ca/papers/cvr%20motion%20in%20depth%20chapter%20submit.pdf}, year = {2011}, url-1 = {http://percept.eecs.yorku.ca/papers/cvr%20motion%20in%20depth%20chapter%20submit.pdf}}
@incollection{Govan:2011zr, author = {Govan, D. and Palmisano, S. A. and Allison, R. S. and Field, M.}, booktitle = {38th Australasian Experimental Psychology Conference}, date-added = {2012-08-13 20:07:23 +0000}, date-modified = {2012-08-13 20:07:23 +0000}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, title = {Effects of Realistic Simulated Linear and Rotary Viewpoint Jitter on Vection}, year = {2011}}
@incollection{Allison:2011fk, abstract = {The renaissance of stereoscopic three-dimensional (S3D) film requires that film-makers revisit assumptions and conventions about factors that influence the visual appreciation and impact of their medium. Capture, post-production and exhibition of stereoscopic content is subject to a number of artefacts and imperfections that impact the viewer experience. This talk will discuss a variety of these issues and their implications for depth perception, visual comfort and sense of scale. }, author = {Robert S. Allison}, booktitle = {Toronto International Stereoscopic 3D Conference}, date-added = {2012-08-13 19:35:44 +0000}, date-modified = {2012-08-13 19:39:28 +0000}, keywords = {Stereopsis}, month = {06}, title = {Perceptual Artifacts in Stereoscopic 3D Film}, url-1 = {http://www.etcenter.org/2011/04/the-smpte-second-annual-international-conference-on-stereoscopic-3d-for-media-entertainment/}, year = {2011}}
@incollection{Allison:2011uq, abstract = {Stereoscopic display adds a compelling tool to the arsenal of techniques that artists can use to create the sense of three-dimensional space in film and other media. In stereo media, as in the real world, people combine the cues to depth to form a coherent perception of the 3D environment. In S3D media, depth cues do not typically correspond to what the viewer would experience in a given scene and are also not in agreement with each other. I will review what vision science tells us about how depth cues are integrated and what happens when they conflict. I will also discuss the role of cue interaction in choosing the configuration of rigs and displays, and how cue interactions create common distortions experienced in S3D media.}, annote = {June 21-22 in New York}, author = {Allison, R.S.}, booktitle = {{SMPTE} International Conference on Stereoscopic 3D for Media and Entertainment}, date-added = {2011-08-10 13:25:07 -0400}, date-modified = {2012-07-03 00:00:33 -0400}, keywords = {Stereopsis}, month = {06}, title = {Depth cue interactions in stereoscopic 3D media}, year = {2011}}
@incollection{Benzeroual:2011fk, abstract = {The geometry of stereopsis makes straightforward predictions regarding the effect of increasing an observer's simulated interocular distance (IO) on perceived depth. Our aim is to characterize the effect of IO on perceived depth, and its dependence on scene complexity and screen size. In Experiment 1 we used S3D movies of an indoor scene, shot with three camera separations (0.25'', 1'' and 1.7''). We displayed this footage on two screens (54'' and 22'') maintaining a constant visual angle. A reference scene with an IO of 1'' was displayed for 5s followed by the test scene. Participants (n=10) were asked to estimate the distances between four pairs of objects in the scene relative to the reference. Contrary to expectations, there was no consistent effect of IO, and all participants perceived more depth on the smaller screen. In Experiment 2 we used static line stimuli, with no real-world context. The same set of conditions was evaluated; all observers now perceived more depth in the larger display and there was a clear dependence on IO. The presence of multiple realistic depth cues has significant and complex effects on perceived depth from binocular disparity; effects that are not obvious from binocular geometry.}, address = {Toulouse, France}, author = {Benzeroual, K. and Laldin, S. and Allison, R.S. and Wilcox, L.M.}, booktitle = {The 34th European Conference on Visual Perception, Perception}, date-added = {2011-08-10 11:44:04 -0400}, date-modified = {2014-09-09 19:16:31 +0000}, keywords = {Stereopsis}, month = {August 28- Sept. 1}, number = {ECVP Abstract Supplement}, pages = {104}, title = {The effect of interocular separation on perceived depth from disparity in complex scenes}, url = {http://www.perceptionweb.com/abstract.cgi?id=v110454}, url-1 = {http://www.perceptionweb.com/abstract.cgi?id=v110454}, volume = {40}, year = {2011}, url-1 = {http://www.perceptionweb.com/abstract.cgi?id=v110454}}
@incollection{Milner:2011vn, author = {Andrew Milner and Robert S. Allison}, booktitle = {CVR 2011: Plastic Vision}, date-added = {2011-06-15 15:12:15 -0400}, date-modified = {2012-07-02 22:45:51 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = {June 15-18}, organization = {Centre for Vision Research}, pages = {G10}, title = {Early Fire Detection: The FireHawk System}, year = {2011}}
@incollection{Laldin:2011uq, author = {Laldin, S.R. and Benzeroual, K. and Allison, R.S. and Wilcox, L.M.}, booktitle = {CVR 2011: Plastic Vision}, date-added = {2011-06-15 15:05:26 -0400}, date-modified = {2011-06-15 15:09:51 -0400}, keywords = {Depth perception}, month = {06}, pages = {C6}, title = {Perceptual effects of Geometric Parameters while viewing complex {S3D} Scenes}, year = {2011}}
@incollection{Guterman:2011kx, author = {Pearl S. Guterman and Robert S. Allison and James E. Zacher and Stephen A. Palmisano}, booktitle = {CVR 2011: Plastic Vision}, date-added = {2011-06-05 21:40:57 -0400}, date-modified = {2011-06-15 15:12:34 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = {June 15-18}, organization = {Centre for Vision Research}, pages = {G11}, title = {The role of body posture in the perception of self-motion}, year = {2011}}
@incollection{Bogdan2011, author = {Bogdan, N. and Allison, R.S. and Suryakumar, R.}, booktitle = {CVR 2011: Plastic Vision}, date-added = {2011-05-11 11:27:28 -0400}, date-modified = {2011-06-15 15:11:37 -0400}, keywords = {Eye Movements & Tracking}, month = {June 15-18}, organization = {Centre for Vision Research}, pages = {G1}, title = {Infrared Based Near Triad Tracking System}, year = {2011}}
@incollection{Vinnikov:2011yi, author = {Vinnikov, M. and Allison, R.S.}, booktitle = {CVR 2011: Plastic Vision}, date-added = {2011-05-11 11:25:46 -0400}, date-modified = {2011-06-15 19:10:24 +0000}, keywords = {Eye Movements & Tracking}, month = {June 15-18}, pages = {E9}, title = {Gaze-Contingent Real-Time Visual Field Simulations}, year = {2011}}
@incollection{Zacher2011, abstract = {Incorporating jitter or oscillation of the vantage point in visual displays produces more compelling illusions of selfmotion (vection), despite generating greater sensory conflicts [1]. We are working with the Canadian Space Agency to develop an experiment to study this phenomenon on the International Space Station. Pragmatic issues favour small, near displays rather than typical immersive displays. This paper studies impact of display characteristics on the jitter/oscillation enhancement on vection. METHODS Visual displays simulated constant velocity forward motion at 1.33 m/s through a virtual world, or the same motion with simulated viewpoint oscillation, on a laptop monitor viewed through an aperture. Various experiments examined the effect of oscillation amplitude, direction, field of view (with a different monitor), focal distance and body posture on vection responses. RESULTS Adding simulated horizontal or vertical viewpoint oscillation to radial flow increased vection a similar amount. Vection strength was increased more for oscillation peak velocities of 0.28 m/s compared to 0.09 m/s. Increasing focal distance by the use of +2D ophthalmic lenses did not measurably impact reported strength of vection. While field of view had no effect, closer viewing distances reduced vection but had no significant effect on the oscillation enhancement. DISCUSSION Motion sickness and spatial disorientation continue to impact the availability and effectiveness of astronauts. The current results will guide the development of ISS studies to improve our understanding of how vestibular and visual signals are recalibrated in altered gravity. REFERENCES [1] Palmisano, S., Allison, R.S. and Pekin. (2008) Perception, 37, 22 -- 33.}, address = {Houston, Texas}, author = {Zacher, J.E. and Guterman, P.S. and Palmisano, S.A. and Allison, R.S.}, booktitle = {Journal of Vestibular Research (8th Symposium on the Role of the Vestibular Organs in Space Exploration)}, date-added = {2011-05-11 11:20:56 -0400}, date-modified = {2011-10-28 21:44:49 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = {04}, pages = {82}, title = {Enhancements of Vection in Depth from Viewpoint Oscillation: Effects of Field of View, Amplitude, Focal Distance and Body Posture}, volume = {21}, year = {2011}}
@incollection{Vinnikov:2011ec, author = {Vinnikov, M. and Allison, R.S.}, booktitle = {1st IEEE Canada Women in Engineering National Conference}, date-added = {2011-05-11 11:18:00 -0400}, date-modified = {2011-05-18 15:57:25 -0400}, keywords = {Eye Movements & Tracking}, month = {04}, title = {Gaze Contingent Real-Time Visual Simulations}, year = {2011}}
@incollection{Rushton:2011ht, abstract = {How do humans visually guide themselves towards a target? The traditional account (eg Gibson J J, 1966, The Senses Considered as Perceptual Systems (Boston:Houghton-Mifflin, Boston); Warren W H, Hannon D J, 1988, Nature, 336, 162-163.) was based on the use of optic flow: the observer regulates his or her direction of walking so as to align the focus of expansion with the target object. Work over the past 13 years points to use of a different strategy: the observer keeps the target perceptually straight-ahead (Rushton S K et al, 1998, Current Biology, 8, 1191-1194). The information required to keep an object straight-ahead is the current direction of an object relative to the body, its ``egocentric direction''. Although egocentric direction is a very simple source of information, modelling shows that it allows for quite sophisticated locomotor behaviour. What of the use of optic flow? Our recent work has shown that it plays an important role in maintaining calibration. Egocentric direction is derived in part from eye orientation and head orientation signals. These signals are prone to drift. It appears the brain keeps perception of egocentric direction calibrated by comparing predictions of the optic flow that will result from any given walking movement against the optic flow that actually results. Any discrepancy then drives a recalibration process. Thus optic flow does contribute to the visual guidance of walking, but indirectly through the recalibration of egocentric direction.}, author = {Rushton, S.K. and Herlihey, T.A. and Allison, R.S.}, booktitle = {AVA/BMVA Meeting on Biological and Computer Vision}, date-added = {2011-05-11 11:15:14 -0400}, date-modified = {2011-05-24 10:07:39 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = {05}, organization = {School of Psychology, Cardiff University}, title = {The use of egocentric direction and optic flow in the visual guidance of walking}, year = {2011}}
@incollection{Tsirlin:2011tb, abstract = {Nakayama and Shimojo (1990) demonstrated that quantitative depth percepts could be generated by monocular occlusions, a phenomenon they called da Vinci stereopsis. They used a configuration where a monocular bar was placed to one side of a binocular rectangle. When an occlusion interpretation was possible, the bar appeared behind the rectangle at a distance that increased as the lateral separation between the bar and the rectangle increased. Gillam, Cook and Blackburn (2003) argued that quantitative depth perception in da Vinci stereopsis was due to double-matching of the bar with the edge of the rectangle. They showed that when the monocular bar was replaced with a monocular dot only qualitative depth percepts remained. However, their stimulus differed from the original in ways that promoted double-matching and the range of separations of the monocular feature from the rectangle was different for the bar and the dot. To evaluate the contributions of monocular occlusions and double-matching to quantitative depth percepts in da Vinci arrangements, we have replicated and extended the Nakayama and Shimojo and Gillam et al. experiments. We reproduced the original stimuli precisely and used the same range of separations for the bar stimuli as for the dot stimuli. We also compared perceived depth from disparity in the bar and dot stimuli when they were presented binocularly. Three of six observers were able to see quantitative depth with the dot stimulus though less depth was perceived than when a monocular bar was used. Interestingly, we found a similar difference in perceived depth when the bar and the dot were presented binocularly. Taken together our results provide evidence that quantitative depth in da Vinci arrangements is based, at least in part, on monocular occlusions, and that this phenomenon depends on the properties of the monocular object and is subject to inter-observer differences.}, author = {Tsirlin, I. and Wilcox, L. M. and Allison, R.S.}, booktitle = {Vision Sciences Society Annual Meeting, Journal of Vision}, date-added = {2011-05-11 11:10:54 -0400}, date-modified = {2012-07-02 19:00:04 -0400}, doi = {10.1167/11.11.337}, keywords = {Stereopsis}, number = {11}, organization = {Vision Sciences Society}, pages = {337}, title = {Decoding da Vinci: quantitative depth from monocular occlusions.}, url-1 = {http://dx.doi.org/10.1167/11.11.337}, volume = {11}, year = {2011}, url-1 = {https://doi.org/10.1167/11.11.337}}
@inproceedings{Benzeroual:2011uq, abstract = {A primary concern when making stereoscopic 3D (S3D) movies is to promote an effective and comfortable S3D experience for the audience when displayed on the screen. The amount of depth produced on-screen can be controlled using a variety of parameters. Many of these are lighting related such as lighting architecture and technology. Others are optical or positional and thus have a geometrical effect including camera interaxial distance, camera convergence, lens properties, viewing distance and angle, screen/projector properties and viewer anatomy (interocular distance). The amount of estimated depth from disparity alone can be precisely predicted from simple trigonometry; however, perceived depth from disparity in complex scenes is difficult to evaluate and most likely different from the predicted depth based on geometry. This discrepancy is mediated by perceptual and cognitive factors, including resolution of the combination/conflict of pictorial, motion and binocular depth cues. This paper will review geometric predictions of depth from disparity and present the results of experiments which assess perceived S3D depth and the effect of the complexity of scene content. }, address = {Liege, Belgium}, author = {Benzeroual, K. and Wilcox, L.M. and Kazimi, A. and Allison, R. S.}, booktitle = {IC3D 2011}, date-added = {2012-04-30 18:56:17 -0400}, date-modified = {2014-09-26 02:24:54 +0000}, doi = {10.1109/IC3D.2011.6584389}, keywords = {Stereopsis}, month = {12}, pages = {59.1-59.8}, title = {On the Distinction between Perceived and Predicted Depth in {S3D} Films}, url = {http://percept.eecs.yorku.ca/papers/06584389.pdf}, url-1 = {http://dx.doi.org/10.1109/IC3D.2011.6584389}, year = {2011}, url-1 = {http://percept.eecs.yorku.ca/papers/06584389.pdf}, url-2 = {https://doi.org/10.1109/IC3D.2011.6584389}}
@inproceedings{Benzeroual:2011yt, abstract = {In S3D film, many factors affect the relationship between the depth in the acquired scene and depth eventually produced by the stereoscopic display. Many are geometric including camera interaxial, camera convergence, lens properties, viewing distance and angle, screen\projector properties and anatomy (interocular). Spatial distortions follow at least in part from geometry (including the cardboard cut-out effect, miniaturization\gigantism, space-size distortion, and object-speed distortion), and can cause a poor S3D experience. However, it is naive to expect spatial distortion to be specified only by geometry --- visual experience is heavily influenced by perceptual and cognitive factors. This paper will review geometrical predictions and present the results of experiments which assess S3D distortions in the context of content, cognitive and perceptual influences, and individual differences. We will suggest ways to assess the influence of acquisition and display parameters and to mitigate unwanted perceptual phenomena.}, annote = {New York, June 21-22, 2011}, author = {Benzeroual, K. and Allison, R.S. and Wilcox, L.M.}, booktitle = {{SMPTE} International Conference on Stereoscopic 3D for Media and Entertainment, {SMPTE} Conf. Proc.}, date-added = {2011-05-11 11:05:31 -0400}, date-modified = {2016-01-03 03:26:01 +0000}, doi = {10.5594/M001420}, keywords = {Stereopsis}, month = {June 21-22}, pages = {1-10}, publisher = {{SMPTE}}, title = {Distortions of Space in Stereoscopic 3D Content}, url-1 = {http://dx.doi.org/10.5594/M001420}, volume = {2011( no. 6)}, year = {2011}, url-1 = {https://doi.org/10.5594/M001420}}
@inproceedings{Banks:2011fk, abstract = {Stereoscopic displays have become very important for many applications, including operation of remote devices, medical imaging, surgery, scientific visualization, computer-assisted design, and more. But the most significant and exciting development is the incorporation of stereo technology into entertainment: specifically, cinema, television, and video games. It is important in these applications for stereo 3D imagery to create a faithful impression of the 3D structure of the scene being portrayed. It is also important that the viewer is comfortable and does not leave the experience with eye fatigue or a headache. And that the presentation of the stereo images does not create temporal artifacts like flicker or motion judder. Here we review current research on stereo human vision and how it informs us about how best to create and present stereo 3D imagery. The paper is divided into four parts: 1) Getting the geometry right; 2) depth cue interactions in stereo 3D media; 3) focusing and fixating on stereo images; and 4) temporal presentation protocols: Flicker, motion artifacts, and depth distortion. }, author = {Martin S. Banks and Jenny R. Read and Robert S. Allison and Simon J. Watt}, booktitle = {{SMPTE} International Conference on Stereoscopic 3D for Media and Entertainment, {SMPTE} Conf. Proc.}, date-added = {2011-05-11 11:03:41 -0400}, date-modified = {2015-07-21 12:31:30 +0000}, doi = {10.5594/M001418}, keywords = {Stereopsis}, pages = {2-31}, title = {Stereoscopy and the Human Visual System}, url-1 = {http://dx.doi.org/10.5594/M001418}, url-2 = {http://www.etcenter.org/2011/04/the-smpte-second-annual-international-conference-on-stereoscopic-3d-for-media-entertainment/}, volume = {2011 (no. 6)}, year = {2011}, url-1 = {https://doi.org/10.5594/M001418}}
@inproceedings{Vinnikov:2011yj, abstract = {ABSTRACT We have developed two novel evaluation techniques for gaze-contingent systems that simulate visual defects. These two techniques can be used to quantify simulated visual defects in visual distortion and visual blur. Experiments demonstrated that such techniques could be useful for quantification of visual field defects to set simulation parameters. They are also useful for quantitative evaluation of simulation fidelity based on measurement of the functional relation between the intended simulated defect and psychophysical results. Author Keywords Gaze-contingent displays, foveation, visual simulations, evaluation of visual simulations}, address = {Palo Alto, California}, author = {Vinnikov, M. and Allison, R.S.}, booktitle = {2nd Workshop on Eye Gaze in Intelligent Human Machine Interaction}, date-added = {2011-05-09 12:33:17 -0400}, date-modified = {2011-05-18 15:56:28 -0400}, keywords = {Eye Movements & Tracking}, month = {02}, organization = {Stanford University}, title = {Evaluation of Simulated Visual Impairment}, url-1 = {http://http://www.ci.seikei.ac.jp/nakano/GAZEWS_IUI2011/proceedings/[9-6]MargaritaVinnikov_shortpaper.pdf}, year = {2011}}
@inproceedings{Tsirlin:2011qh, abstract = {Stereoscopic displays must present separate images to the viewer's left and right eyes. Crosstalk is the unwanted contamination of one eye's image from the image of the other eye. It has been shown to cause distortions, reduce image quality and visual comfort and increase perceived workload when performing visual tasks. Crosstalk also affects one's ability to perceive stereoscopic depth although little consideration has been given to the perception of depth magnitude in the presence of crosstalk. In this paper we extend a previous study (Tsirlin, Allison \& Wilcox, 2010, submitted) on the perception of depth magnitude in stereoscopic occluding and non-occluding surfaces to the special case of crosstalk in thin structures. Crosstalk in thin structures differs qualitatively from that in larger objects due to the separation of the ghost and real images and thus theoretically could have distinct perceptual consequences. To address this question we used a psychophysical paradigm, where observers estimated the perceived depth difference between two thin vertical bars using a measurement scale. Our data show that crosstalk degrades perceived depth. As crosstalk levels increased the magnitude of perceived depth decreased, especially for stimuli with larger relative disparities. In contrast to the effect of crosstalk on depth magnitude in larger objects, in thin structures, a significant detrimental effect was found at all disparities. Our findings, when considered with the other perceptual consequences of crosstalk, suggest that its presence in S3D media even in modest amounts will reduce observers' satisfaction.}, author = {Tsirlin, I. and Allison, R.S. and Wilcox, L.M.}, booktitle = {Electronic Imaging: Stereoscopic Displays and Applications (an updated version appears in Journal of Electronic Imaging)}, date-added = {2011-05-09 12:31:15 -0400}, date-modified = {2016-01-03 03:23:02 +0000}, doi = {10.1117/12.872141}, keywords = {Stereopsis}, pages = {786313, 1-10}, title = {The effect of crosstalk on depth magnitude in thin structures}, url-1 = {http://dx.doi.org/10.1117/12.872141}, volume = {7863}, year = {2011}, url-1 = {https://doi.org/10.1117/12.872141}}
@techreport{Allison:2011kx, author = {Allison, R.S. and Zacher, J.E.}, date-added = {2011-08-30 23:01:26 -0400}, date-modified = {2012-03-11 20:49:21 -0400}, institution = {Canadian Space Agency Space Station Program}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {CSA-XXX}, title = {Test Equipment Data Package for the Falcon-20 C-JITTER Experiment}, year = {2011}}
@techreport{Wilcox:2011uq, author = {Wilcox, L.M. and Allison, R.S.}, date-added = {2011-08-30 22:59:13 -0400}, date-modified = {2011-08-30 22:59:53 -0400}, institution = {report on OCE Project: YO-CR-10059-08}, keywords = {Misc.}, title = {The effects of brightness on viewer preferences in an emerging display technology}, year = {2011}}
@techreport{Wilcox:2011fk, author = {Wilcox, L.M. and Irving, E.L. and Allison, R.S.}, date-added = {2011-08-30 22:58:09 -0400}, date-modified = {2011-08-30 22:58:35 -0400}, institution = {prepared for Christie Digital Systems}, keywords = {Misc.}, title = {Sensitivity to optical distortions in Laser-based projection systems}, year = {2011}}
@techreport{Tomkins:2011lf, address = {Toronto, Canada}, author = {Tomkins, L. and Andriychuk, T. and Zacher, J. E. and Ballagh, M. and McAlpine, R. and Doig, T. and Craig, G. and Filliter, D. and Milner, A. and Allison, R.S.}, date-added = {2011-05-11 11:28:41 -0400}, date-modified = {2011-05-18 16:13:32 -0400}, institution = {York University}, keywords = {Night Vision}, number = {Technical Report CSE-2011-02}, title = {Report on the Sudbury NVD-Aided Aerial Forest Fire Detection Trials held during the Summer of 2010}, year = {2011}}
@article{allison201018, abstract = {We examined the eye movements of pilots as they carried out simulated aircraft landings under day and night lighting conditions. Our five students and five certified pilots were instructed to quickly achieve and then maintain a constant 3-degree glideslope relative to the runway. However, both groups of pilots were found to make significant glideslope control errors, especially during simulated night approaches. We found that pilot gaze was directed most often toward the runway and to the ground region located immediately in front of the runway, compared to other visual scene features. In general, their gaze was skewed toward the near half of the runway and tended to follow the runway threshold as it moved on the screen. Contrary to expectations, pilot gaze was not consistently directed at the aircraft's simulated aimpoint (i.e., its predicted future touchdown point based on scene motion). However, pilots did tend to fly the aircraft so that this point was aligned with the runway threshold. We conclude that the supplementary out-of-cockpit visual cues available during day landing conditions facilitated glideslope control performance. The available evidence suggests that these supplementary visual cues are acquired through peripheral vision, without the need for active fixation.}, author = {Kim, J. and Palmisano, S. and Ash, A. and Allison, R.S.}, date-added = {2011-05-06 11:38:11 -0400}, date-modified = {2012-07-02 17:28:59 -0400}, doi = {10.1145/1773965.1773969}, journal = {{ACM} Transactions on Applied Perception}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {3}, pages = {18 pages}, title = {Pilot Gaze and Glideslope Control During Simulated Aircraft Landings}, url-1 = {http://dx.doi.org/10.1145/1773965.1773969}, url-2 = {http://dx.doi.org/10.1145/1773965.1773969}, volume = {7}, year = {2010}, url-1 = {https://doi.org/10.1145/1773965.1773969}}
@article{Wilkie:2010lt, abstract = {In 1958, JJ Gibson put forward proposals on the visual control of locomotion. Research in the last 50 years has served to clarify the sources of visual and nonvisual information that contribute to successful steering, but has yet to determine how this information is optimally combined under conditions of uncertainty. Here, we test the conditions under which a locomotor robot with a mobile camera can steer effectively using simple visual and extra-retinal parameters to examine how such models cope with the noisy real-world visual and motor estimates that are available to humans. This applied modeling gives us an insight into both the advantages and limitations of using active gaze to sample information when steering.}, author = {Wilkie, R.M. and Wann, J.P. and Allison, R.S.}, date-added = {2011-05-06 11:32:47 -0400}, date-modified = {2012-07-02 13:42:36 -0400}, doi = {10.1145/1870076.1870077}, journal = {{ACM} Transactions on Applied Perception}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {2}, pages = {Article 9, 1-18}, title = {Modelling Locomotor Control: the advantages of a Mobile Gaze}, url-1 = {http://dx.doi.org/10.1145/1870076.1870077}, volume = {8}, year = {2010}, url-1 = {https://doi.org/10.1145/1870076.1870077}}
@article{allison201012, abstract = {Recent experiments have established that monocular areas arising due to occlusion of one object by another contribute to stereoscopic depth perception. It has been suggested that the primary role of monocular occlusions is to define depth discontinuities and object boundaries in depth. Here we use a carefully designed stimulus to demonstrate empirically that monocular occlusions play an important role in localizing depth edges and defining the shape of the occluding surfaces in depth. We show that the depth perceived via occlusion in our stimuli is not due to the presence of binocular disparity at the boundary and discuss the quantitative nature of depth perception in our stimuli. Our data suggest that the visual system can use monocular information to estimate not only the sign of the depth of the occluding surface but also its magnitude. We also provide preliminary evidence that perceived depth of illusory occluders derived from monocular information can be biased by binocular features.}, author = {Tsirlin, I. and Wilcox, L. M. and Allison, R.S.}, date-modified = {2012-07-02 17:29:58 -0400}, doi = {10.1167/10.6.11}, journal = {Journal of Vision}, keywords = {Stereopsis}, number = {6:11}, pages = {1-12}, title = {Monocular occlusions determine the perceived shape and depth of occluding surfaces}, url-1 = {http://dx.doi.org/10.1167/10.6.11}, volume = {10}, year = {2010}, url-1 = {https://doi.org/10.1167/10.6.11}}
@article{allison2010349-355, abstract = {Unrestricted positioning of elements in random-dot stereograms with steep disparity gradients, such as stereo-transparent stereograms depicting overlaid surfaces, can produce perceptual artifacts similar to disparity noise. It is shown that these artifacts hinder the segregation of overlaid surfaces in transparent random-dot stereograms and thus disrupt the perception of stereo-transparency. This effect is intensified with increases in the overall element density of the stimuli. We outline the origin of this phenomenon and discuss techniques to prevent such artifacts.}, author = {Tsirlin, I. and Wilcox, L. M. and Allison, R.S.}, date-modified = {2011-05-10 15:05:04 -0400}, doi = {10.1068/p6252}, journal = {Perception}, keywords = {Stereopsis}, number = {3}, pages = {349-355}, title = {Perceptual artifacts in random-dot stereograms}, url-1 = {http://dx.doi.org/10.1068/p6252}, volume = {39}, year = {2010}, url-1 = {https://doi.org/10.1068/p6252}}
@article{Palmisano:2010qy, abstract = {There has been no direct examination of stereoscopic depth perception at very large observation distances and depths. We measured perceptions of depth magnitude at distances where it is frequently reported without evidence that stereopsis is non-functional. We adapted methods pioneered at distances up to 9 m by R. S. Allison, B. J. Gillam, and E. Vecellio (2009) for use in a 381-m-long railway tunnel. Pairs of Light Emitting Diode (LED) targets were presented either in complete darkness or with the environment lit as far as the nearest LED (the observation distance). We found that binocular, but not monocular, estimates of the depth between pairs of LEDs increased with their physical depths up to the maximum depth separation tested (248 m). Binocular estimates of depth were much larger with a lit foreground than in darkness and increased as the observation distance increased from 20 to 40 m, indicating that binocular disparity can be scaled for much larger distances than previously realized. Since these observation distances were well beyond the range of vertical disparity and oculomotor cues, this scaling must rely on perspective cues. We also ran control experiments at smaller distances, which showed that estimates of depth and distance correlate poorly and that our metric estimation method gives similar results to a comparison method under the same conditions.}, author = {Palmisano, S. and Gillam, B. and Govan, D. G. and Allison, R.S. and Harris, J. M.}, date-modified = {2012-07-02 17:28:11 -0400}, doi = {10.1167/10.6.19}, journal = {Journal of Vision}, keywords = {Stereopsis}, number = {6}, pages = {16}, title = {Stereoscopic perception of real depths at large distances}, url-1 = {http://dx.doi.org/10.1167/10.6.19}, volume = {10}, year = {2010}, url-1 = {https://doi.org/10.1167/10.6.19}}
@article{allison201025-30, abstract = {Chuck Oman has been a guide and mentor for research in human perception and performance during space exploration for over 25 years. His research has provided a solid foundation for our understanding of how humans cope with the challenges and ambiguities of sensation and perception in space. In many of the environments associated with work in space the human visual system must operate with unusual combinations of visual and other perceptual cues. On Earth physical acceleration cues are normally available to assist the visual system in interpreting static and dynamic visual features. Here we consider two cases where the visual system is not assisted by such cues. Our first experiment examines perceptual stability when the normally available physical cues to linear acceleration are absent. Our second experiment examines perceived orientation when there is no assistance from the physically sensed direction of gravity. In both cases the effectiveness of vision is paradoxically reduced in the absence of physical acceleration cues. The reluctance to rely heavily on vision represents an important human factors challenge to efficient performance in the space environment.}, author = {Harris, L. R. and Jenkin, M. and Jenkin, H. and Dyde, R. and Zacher, J. and Allison, R.S.}, date-modified = {2011-05-22 13:21:30 -0400}, doi = {10.3233/VES-2010-0352}, journal = {Journal of Vestibular Research-Equilibrium and Orientation}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {1-2}, pages = {25-30}, title = {The unassisted visual system on earth and in space}, url-1 = {http://dx.doi.org/10.3233/VES-2010-0352}, url-2 = {http://dx.doi.org/10.3233/VES-2010-0352}, volume = {20}, year = {2010}, url-1 = {https://doi.org/10.3233/VES-2010-0352}}
@article{allison2010136-143, abstract = {We hypothesized that it is the conflict between various cues to distance that have produced results purportedly showing that vergence eye movements induced by disparity change are not an effective cue for depth. Single and compound stimuli were used to examine the perceived motion in depth (MID) produced by simulated motion oscillations specified by disparity, relative disparity, and/or looming. Estimations of the extent of MID and binocularly recorded eye movements showed that the vergence induced by disparity change is indeed an effective cue for motion in depth in conditions where looming information does not conflict with it. When looming and disparity are in conflict, looming is the stronger cue. (C) 2009 Elsevier Ltd. All rights reserved.}, author = {Gonzalez, E. G. and Allison, R.S. and Ono, H. and Vinnikov, M.}, date-modified = {2011-05-10 14:44:22 -0400}, doi = {10.1016/j.visres.2009.11.005}, journal = {Vision Research}, keywords = {Motion in depth}, number = {2}, pages = {136-143}, title = {Cue conflict between disparity change and looming in the perception of motion in depth}, url-1 = {http://dx.doi.org/10.1016/j.visres.2009.11.005}, volume = {50}, year = {2010}, url-1 = {https://doi.org/10.1016/j.visres.2009.11.005}}
@article{allison2010155-169, abstract = {Sensitivity to many visual stimuli, and, in particular, image displacement, is reduced during a change in fixation (saccade) compared to when the eye is still. In these experiments, we studied the sensitivity of observers to ecologically relevant image translations of large, complex, real world scenes either during horizontal saccades or during fixation. In the first experiment, we found that such displacements were much less detectable during saccades than during fixation. Qualitatively, even when trans-saccadic scene changes were detectible, they were less salient and appeared slower than equivalent changes in the absence of a saccade. Two further experiments followed up on this observation and estimated the perceived magnitude of trans-saccadic apparent motion using a two-interval forced-choice procedure (Experiment 2) and a magnitude estimation procedure (Experiment 3). Both experiments suggest that trans-saccadic displacements were perceived as smaller than equivalent inter-saccadic displacements. We conclude that during saccades, the magnitude of the apparent motion signal is attenuated as well as its detectability.}, author = {Allison, R.S. and Schumacher, J. and Sadr, S. and Herpers, R.}, date-modified = {2011-05-11 13:32:40 -0400}, doi = {10.1007/s00221-009-2120-y}, journal = {Experimental Brain Research}, keywords = {Eye Movements & Tracking}, number = {1}, pages = {155-169}, title = {Apparent motion during saccadic suppression periods}, url-1 = {http://dx.doi.org/10.1007/s00221-009-2120-y}, volume = {202}, year = {2010}, url-1 = {https://doi.org/10.1007/s00221-009-2120-y}}
@inbook{Allison:2010yg, abstract = {In modern Night Vision Devices (NVDs) `halo' around bright light sources remains a salient imaging artifact. Although a common feature of image intensified imagery, little is known of the perceptual and operational effects of this device limitation. This paper describes two related sets of experiments. In the first set of experiments, we provide quantitative measurements of Night Vision Device (NVD) halos formed by light sources as a function of intensity and distance. This characterization allows for analysis of the possible effects of halo on human perception through NVDs. In the second set of experiments, the effects of halation on the perception of depth and environmental layout are investigated psychophysically. The custom simulation environment used and results from psychophysical experiments designed to analyze halo-induced errors in slope estimation are presented. Accurate simulation of image intensifier physics and NVD scene modeling is challenging and computationally demanding, yet needs to be performed in real-time at high frame rates and at high-resolution in advanced military simulators. Given the constraints of the real-time simulation, it is important to understand how NVD artifacts impact task performance in order to make rational engineering decisions about the required level of fidelity of the NVD simulation. A salient artifact of NVD viewing is halo, the phenomenon where the image of a bright light source appears surrounded by disc-like halo. High-fidelity physical modeling of these halo phenomena would be computationally expensive. To evaluate the level of approximation that would be sufficient for training purposes human factors data is required. NVD halos generated by light sources in a scene have a size that is approximately invariant with intensity and distance. Objective and subjective measures of halo geometry indicate that halo size, when halo is present, is relatively invariant of target distance or intensity. This property results in perceptual distortions and strong illusions with isolated stimuli. In complex scenes, systematic distortions of slant are predicted due to an imposed texture gradient created by the halo. We investigated this hypothesis in psychophysical experiments. The results suggest that perception of slant and glideslope in complex scenes is remarkably tolerant of texture gradients imposed by NVG halo. These results are discussed in terms of NVG simulation and of the ability of human operators to compensate for perceptual distortions. }, address = {New York, NY}, author = {Allison, R.S. and Brandwood, T. and Vinnikov, M. and Zacher, J.E. and Jennings, S. and Macuda, T. and Thomas, P.J. and Palmisano, S.A.}, booktitle = {Vision and Displays for Military and Security Applications: the Advanced Deployable Day/Night Simulation Project}, date-added = {2011-05-06 10:53:34 -0400}, date-modified = {2014-09-26 02:19:07 +0000}, doi = {10.1007/978-1-4419-1723-2_10}, editor = {K. Niall}, keywords = {Night Vision}, pages = {123-140}, publisher = {Springer-Verlag}, rating = {2}, title = {Psychophysics of night vision device halo}, url-1 = {http://dx.doi.org/10.1007/978-1-4419-1723-2_10}, year = {2010}, url-1 = {https://doi.org/10.1007/978-1-4419-1723-2_10}}
@incollection{Govan:2010ys, author = {Govan, D. and Gillam, B. and Palmisano, S. A. and Allison, R. S.}, booktitle = {37th Australasian Experimental Psychology Conference}, date-added = {2012-08-13 20:01:55 +0000}, date-modified = {2012-08-13 20:01:55 +0000}, keywords = {Stereopsis}, title = {Comparing depth interval estimates with motion parallax and stereopsis at distances beyond interaction space}, year = {2010}}
@incollection{Tsirlin:2010rp, author = {Tsirlin, I. and Wilcox, L. M. and Allison, R.S.}, booktitle = {Vision Sciences Society Annual Meeting, Journal of Vision}, date-added = {2011-05-06 14:54:55 -0400}, date-modified = {2012-07-02 13:42:36 -0400}, doi = {10.1167/10.7.373}, keywords = {Depth perception}, number = {7}, pages = {373}, title = {Interactions between monocular occlusions and binocular disparity in the perceived depth of illusory surfaces}, url-1 = {http://dx.doi.org/10.1167/10.7.373}, volume = {10}, year = {2010}, url-1 = {https://doi.org/10.1167/10.7.373}}
@incollection{Bogdan:2010mb, abstract = {The oculomotor response when viewing a near target is characterized by `the near triad': pupil miosis (constriction), binocular convergence and increased accommodation. Most existing eye-tracking systems lack the ability to measure all three of these parameters and are usually specialized to handle only one. Systems that can measure the complete near triad suffer from slow measurement rates, off-line analysis or are cumbersome and inconvenient to use. Singular specialized systems are usually combined ad-hoc but such systems are often complex in architecture and suffer severe limitations in runtime. We describe a video-based eye tracking system based on eccentric photorefraction that allows for remote, high-speed measurement of all three components of the near triad. This provides for precise, simultaneous measurement of oculomotor dynamics as well as having the benefit of being safe and non-intrusive. An extended infrared source illuminated the subject's eye. The corneal reflex and `bright pupil' reflections of this source were imaged by an infrared sensitive camera and used to track gaze direction and pupil diameter. Such eccentric illumination combined with a knife-edge camera aperture allowed the accommodative state of the eye to be estimated from measurements of the gradient of image intensity across the pupil. Real-time measurements are facilitated by detection of Purkinje images to define areas of interest for each pupil followed by pupil edge detection and fitting to an ellipse model. Once the pupils are located, data about the brightness profile, diameter, corneal reflex and pupil center are extracted and processed to calculate the near triad. The system will be used in ongoing experiments assessing the role of oculomotor cues in perception of motion in depth. }, author = {Bogdan, N. and Allison, R.S. and Suryakumar, R.}, booktitle = {Vision Sciences Society Annual Meeting, Journal of Vision,}, date-added = {2011-05-06 14:52:40 -0400}, date-modified = {2012-07-02 13:42:36 -0400}, doi = {10.1167/10.7.507}, keywords = {Eye Movements & Tracking}, number = {7}, pages = {507}, title = {Infrared Tracking of the Near Triad}, url-1 = {http://dx.doi.org/10.1167/10.7.507}, volume = {10}, year = {2010}, url-1 = {https://doi.org/10.1167/10.7.507}}
@incollection{Allison:2010qt, address = {Toronto, Canada}, author = {Allison, R.S. and Wilcox, L.M. and Elder, J.}, booktitle = {SMTPE BOOT CAMP IV---The Next Dimension: 3D, Mobility and More}, date-added = {2011-05-06 14:39:03 -0400}, date-modified = {2011-05-18 15:46:28 -0400}, keywords = {Stereopsis}, month = {June 8th-9th}, title = {Depth of Field in Stereoscopic Moving Images}, url-1 = {https://wiki.cse.yorku.ca/lab/percept/_media/public:ryerson_workshop.pdf}, year = {2010}}
@incollection{Andriychuk:2010zk, address = {Kitchener-Waterloo, Canada}, author = {Andriychuk, T. and Tomkins, L. and Zacher, J. and Ballagh, M. and McAlpine, R. and Doig, T. and Jennings, S. and Milner, A. and Allison, R.S.}, booktitle = {Wildland Fire Canada 2010}, date-added = {2011-05-06 14:21:43 -0400}, date-modified = {2011-05-18 16:08:06 -0400}, keywords = {Night Vision}, month = {October 5th-7th}, title = {Night-Vision Device Aided Aerial Forest Fire Detection: Experience in a Controlled Test Grid}, url-1 = {https://percept.eecs.yorku.ca/papers/wildland fire 2010 abstract.pdf}, url-2 = {http://www.wildlandfirecanada.ca/Presentations/Tuesday/Afernoon/New Tech/Tetyana/NVG Presentation final draft.pdf}, url-3 = {http://www.wildlandfirecanada.ca/Presentations/Tuesday/Afernoon/New Tech/Tetyana/Tetyana.wmv}, year = {2010}}
@inproceedings{Guterman:2010sv, address = {Seattle, Washington}, author = {Guterman, P. and Fukuda, K. and Wilcox, L.M. and Allison, R.S.}, booktitle = {Society for Information Display Annual Meeting}, date-added = {2011-05-09 13:10:30 -0400}, date-modified = {2015-01-26 19:39:16 +0000}, keywords = {Misc.}, month = {05}, organization = {Society for Information Display}, title = {Is brighter always better? The effects of display and ambient luminance on preferences for digital signage}, url = {http://percept.eecs.yorku.ca/papers/SID_2010_Guterman-Final%20small.pdf}, url-1 = {http://percept.eecs.yorku.ca/papers/SID_2010_Guterman-Final%20small.pdf}, year = {2010}, url-1 = {http://percept.eecs.yorku.ca/papers/SID_2010_Guterman-Final%20small.pdf}}
@inproceedings{Wilcox:2010ja, address = {New York}, author = {Wilcox, L.M. and Tsirlin, I. and Allison, R.S.}, booktitle = {{SMPTE} International Conference on Stereoscopic 3D for Media and Entertainment}, date-added = {2011-05-09 13:08:42 -0400}, date-modified = {2015-01-26 19:35:03 +0000}, keywords = {Stereopsis}, month = {07}, title = {Sensitivity to monocular occlusions in stereoscopic imagery: Implications for {S3D} content creation, distribution and exhibition}, url = {http://percept.eecs.yorku.ca/papers/SMPTE%20manuscript%20LW%202010.pdf}, url-1 = {http://percept.eecs.yorku.ca/papers/SMPTE%20manuscript%20LW%202010.pdf}, year = {2010}, url-1 = {http://percept.eecs.yorku.ca/papers/SMPTE%20manuscript%20LW%202010.pdf}}
@inproceedings{Guterman:2010vg, address = {Padua, Italy}, author = {Guterman, P. and Allison, R.S. and Palmisano, S.A.}, booktitle = {Proceedings of the 26th Annual Meeting of the International Society for Psychophysics (ISP)}, date-added = {2011-05-09 13:07:10 -0400}, date-modified = {2011-05-18 16:28:52 -0400}, keywords = {Misc.}, month = {10}, title = {Using VPython for psychophysics}, url-1 = {http://yorku.academia.edu/PearlGuterman/Papers/319182/Using_VPython_for_psychophysics}, year = {2010}}
@inproceedings{Stemberger:2010qm, abstract = {As epitomized in DARPA's 'Augmented Cognition' program, next generation avionics suites are envisioned as sensing, inferring, responding to and ultimately enhancing the cognitive state and capabilities of the pilot. Inferring such complex behavioural states from imagery of the face is a challenging task and multimodal approaches have been favoured for robustness. We have developed and evaluated the feasibility of a system for estimation of cognitive workload levels based on analysis of facial skin temperature. The system is based on thermal infrared imaging of the face, head pose estimation, measurement of the temperature variation across regions of the face and an artificial neural network classifier. The technique was evaluated in a controlled laboratory experiment using subjective measures of workload across tasks as a standard. The system was capable of accurately classifying mental workload into high, medium and low workload levels 81\% of the time. The suitability of facial thermography for integration into a multimodal augmented cognition sensor suite is discussed.}, address = {Ottawa, Canada}, author = {Stemberger, J. and Allison, R.S. and Schnell, T.}, booktitle = {Seventh Canadian Conference on Computer and Robot Vision (CRV2010)}, date-added = {2011-05-06 12:12:04 -0400}, date-modified = {2011-05-18 16:22:26 -0400}, doi = {10.1109/CRV.2010.37}, keywords = {Neural Avionics}, month = {May 31st- June 2nd, 2010}, title = {Thermal imaging as a way to classify cognitive workload}, url-1 = {http://dx.doi.org/10.1109/CRV.2010.37}, url-2 = {http://dx.doi.org/10.1109/CRV.2010.37}, year = {2010}, url-1 = {https://doi.org/10.1109/CRV.2010.37}}
@inproceedings{allison2010263-266, address = {Austin, Texas}, author = {Vinnikov, Margarita and Allison, Robert S.}, booktitle = {Proceedings of the 2010 Symposium on Eye-Tracking Research and Applications}, date-modified = {2015-01-26 19:41:10 +0000}, doi = {10.1145/1743666.1743728}, keywords = {Eye Movements & Tracking}, pages = {263-266}, publisher = {ACM}, title = {Contingency evaluation of gaze-contingent displays for real-time visual field simulations}, url = {http://percept.eecs.yorku.ca/papers/p263-vinnikov.pdf}, url-1 = {http://dx.doi.org/10.1145/1743666.1743728}, year = {2010}, url-1 = {http://percept.eecs.yorku.ca/papers/p263-vinnikov.pdf}, url-2 = {https://doi.org/10.1145/1743666.1743728}}
@techreport{Andriychuk:2010tg, abstract = {Executive Summary Early detection of forest fires, while still in their emergent stages, could greatly improve suppression effectiveness and reduce overall costs. When used for aerial detection patrols, night vision devices (NVD) have potential to improve response times to potential starts and to improve sensitivity. The flight trials described in this report were designed to explore the potential for NVD aided detection in a real operational context but with experimental control and `ground truth' knowledge of the fire source. A series of flight trials were run April 22 to 25, 2010 in the vicinity of the city of Pembroke in the Ottawa Valley region of Eastern Ontario. Small test fires were set at known locations within the Ontario Ministry of Natural Resources (OMNR) infrared (IR) test grid and continuously monitored by remote data loggers. NVD flight detection patrols for an EC130 helicopter were planned in the region of the IR grid. The observers were the only members of the flight crew responsible for detecting fires and had no knowledge of the fire configuration or location. Each observer flew two detection patrols on separate nights with different configurations of sources. The average detection distance for a fire across all nights was 3,584m (95\%CI: 2,697m to 4,471m). The average discrimination distance, where a source could be confidently determined to be a fire or distracter, was 1,193m (95\%CI: 944m to 1,442m). The hit rate was 68\% over the course of the flight trials, higher than expectations based on the small fire sources and novice observers. The hit rate showed improvement over time, likely as observers became familiar with the task and terrain. There was only a single false alarm, when an observer falsely identified a non-fire target as a fire. Correct rejections were quite common (30 events), likely due to the relatively large number of environmental lights in the test area. The results demonstrate that small fires can be detected and reliably discriminated using NVDs at night from distances compatible with typical daytime aerial detection patrols. The trials provide guidance on altitude and spacing requirements for detection patrols and for cues to discriminate environmental light sources from fires. Analysis of detection performance in ongoing field experiments will help to evaluate the utility of and determine best practices for NVD-aided detection of wildland fires.}, address = {Toronto, Canada}, author = {Andriychuk, T. and Tomkins, L. and Zacher, J. E. and Ballagh, M. and McAlpine, R. and Milner, A. and Allison, R.S.}, date-added = {2011-05-11 11:30:48 -0400}, date-modified = {2011-05-18 16:13:00 -0400}, institution = {York University}, keywords = {Night Vision}, number = {Technical Report CSE-2010-09}, title = {Report on the Pembroke NVD-Aided Aerial Forest Fire Detection Trials}, url-1 = {http://www.cse.yorku.ca/techreports/2010/?abstract=CSE-2010-09}, year = {2010}}
@article{allison20092653-65, abstract = {There is a long history of research into depth percepts from very large disparities, beyond the fusion limit. Such diplopic stimuli have repeatedly been shown to provide reliable depth percepts. A number of researchers have pointed to differences between the processing of small and large disparities, arguing that they are subserved by distinct neural mechanisms. Other studies have pointed to a dichotomy between the processing of 1st- and 2nd-order stimuli. Here we review literature on the full range of disparity processing to determine how well different proposed dichotomies map onto one another, and to identify unresolved issues.}, author = {Wilcox, L. M. and Allison, R.S.}, date-modified = {2011-05-10 11:11:27 -0400}, doi = {10.1016/j.visres.2009.06.004}, journal = {Vision Res}, keywords = {Stereopsis}, number = {22}, pages = {2653-65}, title = {Coarse-fine dichotomies in human stereopsis}, url-1 = {http://dx.doi.org/10.1016/j.visres.2009.06.004}, url-2 = {http://dx.doi.org/10.1016/j.visres.2009.06.004}, volume = {49}, year = {2009}, url-1 = {https://doi.org/10.1016/j.visres.2009.06.004}}
@article{allison20091-8, abstract = {The stereoscopic system tolerates some vertical misalignment of the images in the eyes. for However, the reported tolerance for an isolated line stimulus (~4 degrees) is greater than a random-dot stereogram (RDS, ~45 arcmin). We hypothesized that the greater tolerance can be attributed to monoptic depth signals (E. Hering, 1861; M. Kaye, 1978; L. M. Wilcox, J. M. Harris, & S. P. McKee, 2007). We manipulated the vertical misalignment of a pair of isolated stereoscopic dots to assess the contribution of each depth signal separately. For the monoptic stimuli, where only one half-image was present, equivalent horizontal and vertical offsets were imposed instead of disparity. Judgments of apparent depth were well above chance, though there was no conventional disparity signal. For the stereoscopic stimuli, one element was positioned at the midline where monoptic depth perception falls to chance but conventional disparity remains. Subjects lost the depth percept at a vertical misalignment of between 44 and 88 arcmin, which is much smaller than the limit found when both signals were provided. This tolerance for isolated stimuli is comparable to the reported tolerance for RDS. We conclude that previous reports of the greater tolerance to vertical misalignment for isolated stimuli arose from the use of monoptic depth signals.}, author = {Fukuda, K. and Wilcox, L.M. and Allison, R.S. and Howard, I.P.}, date-modified = {2012-07-02 17:31:17 -0400}, doi = {10.1167/9.2.1}, journal = {Journal of Vision}, keywords = {Stereopsis}, number = {2}, pages = {1-8}, title = {A reevaluation of the tolerance to vertical misalignment in stereopsis}, url-1 = {http://dx.doi.org/10.1167/9.2.1}, volume = {9}, year = {2009}, url-1 = {https://doi.org/10.1167/9.2.1}}
@article{allison20091-14, abstract = {The benefits of binocular vision have been debated throughout the history of vision science yet few studies have considered its contribution beyond a viewing distance of a few meters. In the first set of experiments, we compared monocular and binocular performance on depth interval estimation and discrimination tasks at 4.5, 9.0 or 18.0 m. Under monocular conditions, perceived depth was significantly compressed. Binocular depth estimates were much nearer to veridical although also compressed. Regression-based precision measures were much more precise for binocular compared to monocular conditions (ratios between 2.1 and 48). We confirm that stereopsis supports reliable depth discriminations beyond typical laboratory distances. Furthermore, binocular vision can significantly improve both the accuracy and precision of depth estimation to at least 18 m. In another experiment, we used a novel paradigm that allowed the presentation of real binocular disparity stimuli in the presence of rich environmental cues to distance but not interstimulus depth. We found that the presence of environmental cues to distance greatly enhanced stereoscopic depth constancy at distances of 4.5 and 9.0 m. We conclude that stereopsis is an effective cue for depth discrimination and estimation for distances beyond those traditionally assumed. In normal environments, distance information from other sources such as perspective can be effective in scaling depth from disparity.}, author = {Allison, R.S. and Gillam, B. J. and Vecellio, E.}, date-modified = {2012-07-02 17:41:57 -0400}, doi = {10.1167/9.1.10}, journal = {Journal of Vision}, keywords = {Stereopsis}, number = {1 Article 10}, pages = {1-14}, title = {Binocular depth discrimination and estimation beyond interaction space}, url-1 = {http://dx.doi.org/10.1167/9.1.10}, volume = {9}, year = {2009}, url-1 = {https://doi.org/10.1167/9.1.10}}
@article{allison20091-11, abstract = {Safe and effective locomotion depends critically on judgements of the surface properties of the ground to be traversed. Little is known about the role of binocular vision in surface perception at distances relevant to visually guided locomotion in humans. Programmable arrays of illuminated targets were used to present sparsely textured surfaces with real depth at distances of 4.5 and 9.0 m. Psychophysical measurements of discrimination thresholds demonstrated a clear superiority for stereoscopic over monocular judgments of relative and absolute surface slant. Judgements of surface roughness in particular demonstrated a substantial binocular advantage. Binocular vision is thus shown to directly contribute to judgements of the layout of terrain up to at least 4.5 m, and its smoothness to at least 9.0 m. Hence binocular vision could support moment-to-moment wayfinding and path planning, especially when monocular cues are weak.}, author = {Allison, R.S. and Gillam, B. J. and Palmisano, S. A.}, date-modified = {2012-07-02 17:37:07 -0400}, doi = {10.1167/9.12.8}, journal = {Journal of Vision}, keywords = {Stereopsis}, number = {12 Article 8}, pages = {1-11}, title = {Stereoscopic discrimination of the layout of ground surfaces}, url-1 = {http://dx.doi.org/10.1167/9.12.8}, volume = {9}, year = {2009}, url-1 = {https://doi.org/10.1167/9.12.8}}
@inbook{Wilcox:2009mn, address = {Thousand Oaks, CA}, author = {Wilcox, L.M. and Allison, R.S.}, booktitle = {Encyclopedia of Perception}, date-added = {2011-05-06 11:18:20 -0400}, date-modified = {2014-07-19 21:28:36 +0000}, editor = {E. Bruce Goldstein}, keywords = {Stereopsis}, pages = {208-212}, publisher = {Sage Publications Inc}, title = {Binocular Vision and Stereopsis}, year = {2009}}
@incollection{Govan:2009vn, author = {Govan, D. and Gillam, B. and Palmisano, S. A. and Allison, R. S. and Harris, J. M.}, booktitle = {36th Australasian Experimental Psychology Conference}, date-added = {2012-08-13 19:51:22 +0000}, date-modified = {2012-08-13 20:01:21 +0000}, keywords = {Stereopsis}, month = {April 17-19, 2009}, title = {Binocular depth interval estimation beyond interaction space}, year = {2009}}
@incollection{Gillam:2009wk, address = {Regensburgh, Germany}, author = {Gillam, B. and Palmisano, S. A. and Govan, D. and Allison, R.S. and Harris, J.}, booktitle = {In 32nd European Conference on Visual Perception}, date-added = {2011-05-09 13:25:12 -0400}, date-modified = {2014-09-09 19:05:06 +0000}, keywords = {Stereopsis}, month = {08}, number = {Suppl}, series = {59}, title = {Stereoscopic depth magnitudes at greater distances in an old steam railway tunnel}, url-1 = {http://www.perceptionweb.com/abstract.cgi?id=v091008}, volume = {38}, year = {2009}}
@incollection{Smith:2009dd, author = {Smith, C.E. and Wilcox, L.M. and Allison, R.S. and Karanovic, O. and Wilkinson, F.}, booktitle = {CVR 2009: Vision in 3D Environments, BI-15}, date-added = {2011-05-09 11:10:36 -0400}, date-modified = {2011-05-18 15:48:57 -0400}, keywords = {Depth perception}, title = {Effect of Differential Interocular Blur on Depth Perception From Fine and Coarse Disparities}, year = {2009}}
@incollection{Fukuda:2009nb, author = {Fukuda, K and Wilcox, L.M. and Allison, R.S. and Howard, I.P.}, booktitle = {CVR 2009: Vision in 3D Environments, BI-4}, date-added = {2011-05-09 11:09:19 -0400}, date-modified = {2011-05-18 16:08:38 -0400}, keywords = {Stereopsis}, title = {On the contribution of monoptic depth and binocular disparity to depth from diplopic images}, year = {2009}}
@incollection{Tsirlin:2009ya, author = {Tsirlin, I. and Wilcox, L.M. and Allison, R.S.}, booktitle = {CVR 2009: Vision in 3D Environments, MO-15}, date-added = {2011-05-09 11:03:59 -0400}, date-modified = {2011-05-18 16:21:27 -0400}, keywords = {Stereopsis}, title = {The role of monocular occlusion in the construction of three-dimensional surfaces}, year = {2009}}
@incollection{Teather:2009ug, author = {Teather, R. and Allison, R.S. and Stuerzlinger, W.}, booktitle = {CVR 2009: Vision in 3D Environments, MU-3}, date-added = {2011-05-09 11:01:55 -0400}, date-modified = {2011-05-18 15:54:50 -0400}, keywords = {Augmented & Virtual Reality}, title = {Evaluating Visual/Motor Coupling in Fish-Tank VR}, url-1 = {http://www.cse.yorku.ca/~wolfgang/papers/colocateddisjoint.pdf}, year = {2009}}
@incollection{Guterman:2009er, author = {Guterman, P. and Allison, R.S. and Jennings, S. and Craig, G. and Parush, A. and Gauthier, M. and Macuda, T.}, booktitle = {CVR 2009: Vision in 3D Environments, TI-8}, date-added = {2011-05-09 10:58:01 -0400}, date-modified = {2011-05-18 16:20:29 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, title = {The impact of a limited field of view on active search and spatial memory}, year = {2009}}
@incollection{Herriot:2009qz, abstract = {Introduction: Convergence insufficiency is a condition in which the eyes are unable to make coordinated convergence movements to near objects. It is a common condition with a prevalence as high as 17.6\% reported in clinical settings (Rouse et al., 2008). Patients with symptoms of headaches and diplopia are often prescribed eye exercises to train their oculomotor coordination; however traditional forms of the exercises are often tedious, leading to poor patient compliance (Gallaway et al., 2002). The purpose of this study was to investigate the efficacy and user acceptance of game-based vision training in comparison to traditional methods of vision training for treatment of convergence insufficiency. Methods:Twelve participants with convergence insufficiency and six without were asked to play a three-dimensional version of Pac-Man using a stereoscope to fuse two separate images. As a participant improved, the convergence demand increased and operant conditioning paradigms were used to keep the participant motivated. Three other participants with convergence insufficiency were asked to perform fusional vergence training with vectograms and pencil push-ups. Training lasted two weeks, with measurements of binocularity taken at the initial, 1-week, and final appointments. Participants completed a visual symptom questionnaire prior to their training and both a visual symptom questionnaire and an acceptance questionnaire after completion. Results:Both groups with convergence insufficiency had similar improvement in near point of convergence, positive fusional vergence, and reports of eye strain; however, participants in the game-based vision training group were more likely to rate the training as fun and motivating than participants assigned to traditional vision training. The group without convergence insufficiency showed little improvement in near point of convergence and positive fusional vergence but also reported that game-based vision training was fun and motivating. Conclusion: Computer gaming based vision therapy is more stimulating than traditional methods of vision training. We expect this will translate into greater compliance and improved outcome for patients with convergence insufficiency. Keywords: clinical (human) or epidemiologic studies: treatment/prevention assessment/controlled clinical trials * binocular vision/stereopsis}, author = {Herriot, C.G. and Irving, E.L. and Carvelho, T. and Allison, R.S.}, booktitle = {Association for Research in Vision and Ophthalmology (ARVO) Annual Meeting}, date-added = {2011-05-06 15:51:30 -0400}, date-modified = {2014-02-03 14:43:29 +0000}, keywords = {Gaming for Vision Therapy}, month = {May 3rd-7th}, organization = {Association for Research in Vision and Ophthamology}, title = {Efficacy and User Acceptance of Computer Gaming Paradigms for Vision Training}, url-1 = {http://abstracts.iovs.org//cgi/content/abstract/50/5/3827?sid=dcb140e7-233c-4631-8dd7-b4638b854af6}, volume = {50}, year = {2009}}
@incollection{Smith:2009uz, abstract = {Purpose:Traditionally presbyopia is treated using corrective bifocal or multifocal lenses. An alternative is to correct one eye for near and the other for distance with a method known as ``monovision''. It is known that differential interocular blur can degrade stereoacuity, and recent studies have confirmed that monovision treatment increases stereoacuity thresholds. However, stereoacuity tests do not assess disparity sensitivity in the coarse range. Given the proven link between stereopsis and stability, we have measured the short-term effects of induced monovision on stereopsis over a broad range of fine (fused) and coarse (diplopic) disparities at both near and far viewing distances. Methods:Stimuli were presented dichoptically using a time-sequential polarized stereoscopic display. During each trial a line was presented for 300 ms with either crossed or uncrossed disparity above a zero disparity fixation cross. Participants indicated the direction of the depth offset. In one session baseline performance was assessed with optimal optical correction. In another, monovision was induced by adding -1D and +1D lenses in front of the dominant and non-dominant eyes respectively. We assessed performance at distances of 62 and 300 cm in counterbalanced blocks. Within each block, the stimuli were presented at 5 fine disparities ranging from 60 to 2400 arcsec and 5 coarse disparities ranging from 1o to 3.5o. Results:Induced monovision resulted in decreased accuracy relative to baseline in the fine disparity range, but effects were minimal in the coarse range. Monovision also had a larger impact on performance at a viewing distance of 300 cm than at 62 cm. Conclusions:Induced monovision not only increases stereoacuity thresholds, but degrades depth discrimination across the range of fusable disparities in young observers. This effect on fine disparity is accentuated at larger viewing distances typical of fixation distances during walking, suggesting that stability during locomotion may be degraded. However, we also found that coarse stereopsis was relatively spared, and this may offset the observed losses. Keywords: binocular vision/stereopsis * presbyopia}, author = {Smith, C.E. and Wilcox, L.M. and Allison, R.S. and Karanovic, O. and Wilkinson, F}, booktitle = {Association for Research in Vision and Ophthalmology (ARVO) Annual Meeting}, date-added = {2011-05-06 15:12:38 -0400}, date-modified = {2014-02-03 14:44:57 +0000}, keywords = {Stereopsis}, month = {May 3rd-7th}, organization = {Association for Research in Vision and Ophthamology}, title = {Monovision: Consequences for Depth Perception From Fine and Coarse Disparities}, url-1 = {http://abstracts.iovs.org//cgi/content/abstract/50/5/2887?sid=be049216-08e0-4c83-834b-c1c973f7dca9}, volume = {50}, year = {2009}}
@incollection{Ash:2009km, abstract = {We examined the effects of starting altitude, scene lighting and runway length on glideslope control and touchdown during simulated flight. Glideslope misperception is common during aircraft landings, especially when visibility is reduced. It is therefore important to measure the glideslope control errors generated by such misperceptions and determine whether they can be adequately compensated for. Fixed-wing aircraft landings were simulated under day or night lighting conditions, with pilots starting their final approach either ``too high'', ``too low'' or already on the desired 3 degree glideslope. Eleven private and six student pilots actively controlled these simulated landings until they touched down on one of two runways (either 30 m x 1331 m or 30 m x 1819 m). Both student and private pilots were poor at compensating for approaches that started ``too high'' or ``too low'', particularly at night. However, they were able to adjust for these glideslope control errors prior to touchdown via the proper and appropriate execution of the landing flare. While private pilots were no more accurate than students during the glideslope control phase, they typically executed the safest and smoothest landings. Application: This study suggests that flight simulation could be useful in training student pilots to carry out safe landings via the appropriate execution of the landing flare.}, address = {Melbourne, Australia}, author = {Ash, A. and Palmisano, S. and Kim, J. and Allison, R.}, booktitle = {Combined Abstracts of 2009 Australian Psychology Conferences: The abstracts of the 36th Australasian Experimental Psychology Conference}, date-added = {2011-05-06 15:05:24 -0400}, date-modified = {2011-05-22 13:36:45 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, organization = {The Australian Psychological Society}, pages = {3-4}, title = {Effects of starting height, lighting and runway length on glideslope control and landing quality}, url-1 = {https://misprd.uow.edu.au/ris_public/WebObjects/RISPublic.woa/wo/2.0.12.1.13.3.3.1;jsessionid=7AACA9D9670B94B7BD8D879386367DF0}, year = {2009}}
@incollection{Palmisano:2009bl, address = {Melbourne}, author = {Palmisano, S. A. and Kim, J. and Ash, A. and Allison, R.}, booktitle = {Combined abstracts of 2009 Australian psychology conferences: The abstracts of the 36th Australasian Experimental Psychology Conference}, date-added = {2011-05-06 15:00:44 -0400}, date-modified = {2011-05-18 16:32:02 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, organization = {Australian Psychological Society}, pages = {43}, title = {Where do pilots look when they land?}, url-1 = {https://misprd.uow.edu.au/ris_public/WebObjects/RISPublic.woa/wo/2.0.12.1.13.1.3.1;jsessionid=7AACA9D9670B94B7BD8D879386367DF0}, year = {2009}}
@incollection{Allison:2009ay, address = {Toronto, Canada}, author = {Allison, R.S.}, booktitle = {American Psychological Association 117th Annual Convention}, date-added = {2011-05-06 14:58:04 -0400}, date-modified = {2011-05-11 13:09:39 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = {August 6th-9th}, title = {Natural and Enhanced Visual Perception During Flight}, year = {2009}}
@incollection{allison2009286-286, abstract = {We have previously described a perceptual asymmetry that occurs when viewing pseudo-transparent random element stereograms. That is, the minimum separation in depth needed to segregate two overlaid surfaces in a random-element stereogram depends on the distribution of elements across the surfaces. With the total element density fixed, significantly larger inter-plane disparities are required for perceptual segregation of overlaid surfaces when the front surface has fewer elements than the back surface than vice versa. In the experiments described here we test the hypothesis that this perceptual asymmetry reflects a fundamental difference in signal strength for the front and back surfaces which results from disparity interpolation. That is, we propose that the blank regions between elements are assigned to the back plane, making it appear opaque. We tested this hypothesis in a series of experiments and find that: i) the total element density in the stimulus does not affect the asymmetry ii) the perceived relative density of the two surfaces shows a similar asymmetry iii) manipulations favouring perceptual assignment of the spaces into surfaces other than the two overlaid element surfaces reduces the asymmetry. We propose that the interpolation of the spaces between the elements defining the surfaces is mediated by a network of inter-neural connections; excitatory within-disparity, and inhibitory across disparity. Our data suggest that the strength of the inhibitory connections is modulated according to mid-level figure ground assignment. We are using our psychophysical results to inform the development of a computational model of this network.}, author = {Wilcox, Laurie M. and Tsirlin, Inna and Allison, Robert S.}, booktitle = {Journal of Vision}, date-modified = {2012-07-02 19:02:52 -0400}, doi = {10.1167/9.8.286}, journal = {Journal of Vision}, keywords = {Stereopsis}, number = {8}, pages = {286-286}, title = {Perceptual asymmetry in stereo-transparency: The role of disparity interpolation}, url-1 = {http://dx.doi.org/10.1167/9.8.286}, volume = {9}, year = {2009}, url-1 = {https://doi.org/10.1167/9.8.286}}
@incollection{allison2009277-277, abstract = {It is well established that monocular regions arising from occlusion of one object by another contribute to stereoscopic depth perception. However, the exact role of monocular occlusions in 3D scene perception remains unclear. One possibility is that monocular occlusions define object boundaries or discontinuities in depth. This is an attractive possibility, but to date it has not been tested empirically. Here we describe a series of experiments that directly test this hypothesis. Our novel stereoscopic stimulus consists of a foreground rectangular region set against a random-dot background positioned at zero disparity. One side of the foreground region is filled with a random-dot texture shifted towards the observer in apparent depth. The remaining area of the foreground is blank and carries no disparity information. In several experiments, we vary the presence or absence and the width of occluded areas at the border of the central blank area and the background texture. Our data show that the presence of occluded elements on the boundary of the blank area dramatically influences the perceived shape of the foreground region. If there are no occluded elements, the foreground appears to contain a depth step, as the blank area lies at the depth of the zero disparity border. When occluded elements are added, the blank region is seen vividly at the same depth as the texture, so that the foreground is perceived as a single opaque planar surface. We show that the depth perceived via occlusion is not due to the presence of binocular disparity at the boundary, and that it is qualitative, not quantitative in nature. Taken together, our experiments provide strong support for the hypothesis that monocular occlusion zones are important signals for the presence and location of depth discontinuities.}, author = {Tsirlin, Inna and Wilcox, Laurie and Allison, Robert}, booktitle = {Journal of Vision}, date-modified = {2012-07-02 19:02:04 -0400}, doi = {10.1167/9.8.277}, journal = {Journal of Vision}, keywords = {Stereopsis}, number = {8}, pages = {277-277}, title = {Identifying discontinuities in depth: A role for monocular occlusions}, url-1 = {http://dx.doi.org/10.1167/9.8.277}, volume = {9}, year = {2009}, url-1 = {https://doi.org/10.1167/9.8.277}}
@incollection{allison20091137-1137, abstract = {Many optical devices limit the amount of the visual field that can be seen at any one time. Here we examine how these limits on Field of View (FoV) impair the ability to integrate visual information and make navigational decisions. Participants wore field-restricting goggles with separate groups fitted with either a $40^{\circ}$ or $90^{\circ}$ horizontal FoV. Subjects actively explored a maze-like environment over the course of 12 search trials. For each search trial, subjects were given a specific target and asked to find it as quickly as possible. The time and path walked to the targets were recorded on paper. Between each trial subjects were blindfolded and led to a new location in the environment. After the search trials, they completed a set of spatial memory tasks that included sketching a map of the search area, and judging the relative direction of and distances between objects. Search performance was measured by average walking speed, which was determined by dividing the path length by the search time for each trial. Participants with the narrower FoV walked significantly slower to the targets, but they increased their speed over time. Independent raters, who judged the sketch maps on layout, scale, and geometry showed a significant preference for the maps of the wide FoV group over the narrow FoV group. However, there was no effect of FoV for the relative direction and distance estimation task indicating a limited impact on the memory of locations of objects in the environment. In contrast, the results suggest that FoV restriction has a significant impact on the spatial representation of the layout of one's environment that needs to be considered in the design and use of devices that augment or enhance vision.}, author = {Guterman, Pearl S. and Allison, Robert S. and Jennings, Sion and Craig, Greg and Parush, Avi and Gauthier, Michelle and Macuda, Todd}, booktitle = {Journal of Vision}, date-modified = {2012-07-02 17:52:04 -0400}, doi = {10.1167/9.8.1137}, journal = {Journal of Vision}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {8}, pages = {1137-1137}, title = {The outer limits: How limiting the field of view impacts navigation and spatial memory}, url-1 = {http://dx.doi.org/10.1167/9.8.1137}, volume = {9}, year = {2009}, url-1 = {https://doi.org/10.1167/9.8.1137}}
@incollection{allison2009631-631, abstract = {It is known that modulations of absolute binocular disparity of a textured surface do not create a sensation of motion in depth (MID) when the image does not change size (loom). We reported previously that modulations of disparity do create some MID in a surface containing a radial pattern that lacks a looming signal when it moves in depth. We have built an instrument that allows us to independently control looming, changing absolute disparity (vergence), and changing relative disparity of surfaces actually moving in depth. A textured surface and a surface with a radial pattern moved back and forth in depth between 40 cm and 70 cm. With monocular viewing, looming created MID of the textured display but not of the radial display. Modulation of absolute disparity (vergence) produced no MID of the textured display but some MID of the radial display. When modulation of absolute disparity was increased relative to looming, MID was increased for both displays. When disparity modulation and looming were in conflict, MID decreased for both stimuli. These results indicate cue summation. Superimposition of a stationary reference stimulus that provided changing relative disparity, generally increased MID for both stimuli. Addition of the reference stimulus to the radial display with reversed vergence produced MID in accordance with the vergence signal. Addition of the reference stimulus to the patterned display with vergence reversed relative to looming, produced a paradoxical effect. The textured display appeared to move simultaneously in opposite directions. When it appeared to move forward relative to the observer, it appeared to move backward relative to the stationary reference stimulus. This indicates strong cue dissociation. We will demonstrate this unique paradoxical effect.}, author = {Fukuda, Kazuho and Howard, Ian P. and Allison, Robert S.}, booktitle = {Journal of Vision}, date-modified = {2012-07-02 17:51:29 -0400}, doi = {10.1167/9.8.631}, journal = {Journal of Vision}, keywords = {Motion in depth}, number = {8}, pages = {631-631}, title = {Contributions of vergence, looming, and relative disparity to the perception of motion in depth}, url-1 = {http://dx.doi.org/10.1167/9.8.631}, volume = {9}, year = {2009}, url-1 = {https://doi.org/10.1167/9.8.631}}
@inproceedings{Murray:2009jf, address = {Lindfield, Australia}, author = {Murray, R. and Allison, R.S. and Palmisano, S. A.}, booktitle = {SimTect 2009 Conference Proceedings}, date-added = {2011-05-06 13:08:13 -0400}, date-modified = {2011-05-18 15:57:52 -0400}, editor = {E. Leigh}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {87-91}, read = {0}, title = {Glideslope perception during aircraft landing}, url-1 = {https://percept.eecs.yorku.ca/papers/murray 2009 simtec.pdf}, year = {2009}}
@inproceedings{Guterman:2009ov, address = {Galway, Ireland}, author = {Guterman, P. and Allison, R.S. and McCague, H.}, booktitle = {Proceedings of the 25th Annual Meeting of the International Society for Psychophysics}, date-added = {2011-05-06 12:45:21 -0400}, date-modified = {2011-05-18 16:17:55 -0400}, keywords = {Misc.}, month = {October 21st-24th}, title = {The application of circular statistics to psychophysical research}, url-1 = {http://yorku.academia.edu/PearlGuterman/Papers/159955/The_application_of_circular_statistics_to_psychophysical_research}, year = {2009}}
@inproceedings{allison2009624-629, address = {Toronto, Canada}, author = {Teather, R and Allison, R.S. and Stuerzlinger, W.}, booktitle = {{IEEE} Toronto International Conference - Science and Technology for Humanity}, date-modified = {2011-05-11 13:23:56 -0400}, doi = {10.1109/TIC-STH.2009.5444423}, keywords = {Augmented & Virtual Reality}, pages = {624-629}, publisher = {IEEE}, title = {Comparing Coupled and Decoupled Input/Display Spaces in Fish-Tank VR}, url-1 = {http://dx.doi.org/10.1109/TIC-STH.2009.5444423}, year = {2009}, url-1 = {https://doi.org/10.1109/TIC-STH.2009.5444423}}
@inproceedings{allison2009521-526, abstract = {Aerial search for targets on the ground is a challenging task and success depends on providing proper intelligence to the searchers. Recent advances in avionics enhanced and synthetic vision systems (ESVS) offer new opportunities to present this information to aircrew. This paper describes the concept and implementation of a new ESVS technique intended to support flight crews in aerial search for search and rescue missions and other guided search scenarios. Most enhanced vision systems for aviation have targeted the pilot in order to support flight and navigation tasks. The Probability Grid Mapping system (PGM) is unique in that it aims to improve the effectiveness of the other officer in the aircraft who is managing and performing the tactical mission. The PGM provides the searcher with an augmented, conformal, digital moving map of the search area that encodes the estimated probability of the target being found in various locations. A priori estimation of these probabilities allows for prioritization of search areas, reduces search duplication and improves coverage and ideally maximizes search effectiveness. The conformal 3D map is displayed with appropriate perspective projection using a head-slaved optical see-through head-mounted display allowing it to be registered with and augment the real world. To evaluate the system prior to flight test, a simulation environment was developed for study of the effectiveness of highlighting methods, update strategies, and probability mapping methods.}, address = {New York}, author = {Shabaneh, M. and Merei, A. and Jennings, S. and Allison, R.S.}, booktitle = {IEEE TIC-STH 09: 2009 IEEE Toronto International Conference: Science and Technology for Humanity}, date-modified = {2011-09-12 22:44:49 -0400}, doi = {10.1109/TIC-STH.2009.5444443}, keywords = {Augmented & Virtual Reality}, pages = {521-526}, publisher = {IEEE}, title = {Probability Grid Mapping System for Aerial Search}, url-1 = {http://dx.doi.org/10.1109/TIC-STH.2009.5444443}, url-2 = {http://dx.doi.org/10.1109/TIC-STH.2009.5444443}, year = {2009}, url-1 = {https://doi.org/10.1109/TIC-STH.2009.5444443}}
@techreport{Shabaneh:2009dt, address = {Toronto, Canada}, author = {Shabaneh, M. and Guterman, P. and Zacher, J. and Sakano, Y. and Allison, R.S.}, date-added = {2019-02-03 10:26:47 -0500}, date-modified = {2019-02-03 10:26:47 -0500}, institution = {York University}, keywords = {Night Vision}, title = {Report on Selected Issues Related to NVG Use in a Canadian Security Context, Technical Report CSE-2009-07}, url-1 = {https://percept.eecs.yorku.ca/papers/Report on Selected Issues Related to NVG Use in a Canadian Security Context.pdf}, year = {2009}}
@article{allison20081150-64, abstract = {The authors examined observers steering through a series of obstacles to determine the role of active gaze in shaping locomotor trajectories. Participants sat on a bicycle trainer integrated with a large field-of-view simulator and steered through a series of slalom gates. Steering behavior was determined by examining the passing distance through gates and the smoothness of trajectory. Gaze monitoring revealed which slalom targets were fixated and for how long. Participants tended to track the most immediate gate until it was about 1.5 s away, at which point gaze switched to the next slalom gate. To probe this gaze pattern, the authors then introduced a number of experimental conditions that placed spatial or temporal constraints on where participants could look and when. These manipulations resulted in systematic steering errors when observers were forced to use unnatural looking patterns, but errors were reduced when peripheral monitoring of obstacles was allowed. A steering model based on active gaze sampling is proposed, informed by the experimental conditions and consistent with observations in free-gaze experiments and with recommendations from real-world high-speed steering.}, author = {Wilkie, R. M. and Wann, J. P. and Allison, R.S.}, date-modified = {2011-05-11 13:10:57 -0400}, doi = {10.1037/0096-1523.34.5.1150}, journal = {J Exp Psychol Hum Percept Perform}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {5}, pages = {1150-64}, title = {Active gaze, visual look-ahead, and locomotor control}, url-1 = {http://dx.doi.org/10.1037/0096-1523.34.5.1150}, volume = {34}, year = {2008}, url-1 = {https://doi.org/10.1037/0096-1523.34.5.1150}}
@article{allison20081-10, abstract = {Stereo-transparency is an intriguing, but not well-understood, phenomenon. In the present experiment, we simultaneously manipulated the number of overlaid planes, density of elements, and depth separation between the planes in random dot stereograms to evaluate the constraints on stereoscopic transparency. We used a novel task involving identification of patterned planes among the planes constituting the stimulus. Our data show that observers are capable of segregating up to six simultaneous overlaid surfaces. Increases in element density or number of planes have a detrimental effect on the transparency percept. The effect of increasing the inter-plane disparity is strongly influenced by other stimulus parameters. This latter result can explain a difference in the literature concerning the role of inter-plane disparity in perception of stereo-transparency. We argue that the effects of stimuli parameters on the transparency percept can be accounted for not only by inhibitory interactions, as has been suggested, but also by the inherent properties of disparity detectors.}, author = {Tsirlin, I. and Allison, R.S. and Wilcox, L. M.}, date-modified = {2012-07-02 19:00:44 -0400}, doi = {10.1167/8.5.5}, journal = {Journal of Vision}, keywords = {Stereopsis}, number = {5 Article 5}, pages = {1-10}, title = {Stereoscopic transparency: constraints on the perception of multiple surfaces}, url-1 = {http://dx.doi.org/10.1167/8.5.5}, volume = {8}, year = {2008}, url-1 = {https://doi.org/10.1167/8.5.5}}
@article{allison200822-33, abstract = {We examined the vection in depth induced when simulated random self-accelerations (jitter) and periodic self-accelerations (oscillation) were added to radial expanding optic flow (simulating constant-velocity forward self-motion). Contrary to the predictions of sensory-conflict theory frontal-plane jitter and oscillation were both found to significantly decrease the onsets and increase the speeds of vection in depth. Depth jitter and oscillation had lesser, but still significant, effects on the speed of vection in depth. A control experiment demonstrated that adding global perspective motion which simulated a constant-velocity frontal-plane self-motion had no significant effect on vection in depth induced by the radial component of the optic flow. These results are incompatible with the notion that constant-velocity displays produce optimal vection. Rather, they indicate that displays simulating self-acceleration can often produce more compelling experiences of self-motion in depth.}, author = {Palmisano, S. and Allison, R.S. and Pekin, F.}, date-modified = {2011-05-11 13:15:50 -0400}, journal = {Perception}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {1}, pages = {22-33}, title = {Accelerating self-motion displays produce more compelling vection in depth}, url-1 = {https://percept.eecs.yorku.ca/papers/Palmisano,Allison&Pekin.pdf}, volume = {37}, year = {2008}}
@incollection{Sadr:2008um, abstract = {Saccadic eye movements are rapid shifts of gaze that direct the fovea, from one point of interest to another. On each saccade, the entire scene streams across the retina at hundreds of degrees per second. However, this streaming is not apparent, due to a reduced visual sensitivity toward motion during saccades. We have observed that when scenes translate transsaccadically (during saccades) they are perceived as moving slower than equivalent sized intersaccadic transitions. We confirmed these findings using a magnitude estimation technique (Sadr, Allison & Vinnikov, ECVP 2007). We further explored the dependence of transsaccadic motion perception on the direction of shift in a 4AFC experiment. We examined the effect of different scene transitions relative to saccade directions both horizontally and vertically, and subjects had to indicate direction of the scene transitions if detected. Subjects sequentially fixated blinking fixation points (20o apart) indicated on each image based on horizontal or vertical saccade direction conditions. We conclude that during saccades, the magnitude of the velocity signal is attenuated as well as its detectability. Furthermore, the extent of saccadic suppression depends on the relative saccade direction and the direction of scene transition. }, author = {Sadr, S. and Allison, R.S. and Vinnikov, M. and Swierad, D.}, booktitle = {Vision Sciences Society Annual Conference, Journal of Vision}, date-added = {2011-05-06 16:15:13 -0400}, date-modified = {2012-07-02 18:04:45 -0400}, doi = {10.1167/8.6.933}, keywords = {Eye Movements & Tracking}, number = {6}, organization = {Vision Sciences Society}, pages = {933, 933a}, title = {Influence of relative saccade direction on detection of transsaccadic natural scene transitions}, url-1 = {http://dx.doi.org/10.1167/8.6.933}, volume = {8}, year = {2008}, url-1 = {https://doi.org/10.1167/8.6.933}}
@incollection{Suryakumar:2008er, abstract = {Accommodation and Pupil Responses to Random-Dot Stereograms R. Suryakumar and R. S. Allison Center for Vision Research, Computer Science and Engineering, York University, Toronto, Ontario, Canada Commercial Relationships: R. Suryakumar, None; R.S. Allison, None. Support: NSERC Canada, PREA, CIHR Strategic Training Grant Abstract Purpose:Recently, it has been shown that a transient pupil constriction occurs following presentation of a random-dot stereogram with uncrossed disparity (Li, Z and Sun, F. Exp Br Res, 2006, 168:436). We investigated the dynamic characteristics of such pupil responses and whether they were coupled with changes in ocular focus. Methods:Four subjects (mean age=$26.8\pm 3.6$yrs) participated in the study. Stereo half images were displayed on a pair of computer monitors placed at a distance of 60 cm in a Wheatstone stereoscope arrangement. Subjects fixated the center of the random-dot stereogram which alternated between depicting a flat plane and a 0.5 cpd, 30 arc-minute peak disparity, sinusoidal corrugation in depth. In all cases, fixation remained constant at the 60cm screen distance. Accommodation and pupil responses were measured monocularly using a custom built, high-speed photorefractor at 100Hz and analyzed offline. The onset and end of the accommodation and pupil responses were identified to estimate amplitude. The pupil responses were then differentiated to estimate peak velocity. Results:A transient pupil constriction and positive accommodation were observed during both uncrossed and crossed disparity presentations (Uncrossed: $0.26\pm 0.12$mm, $0.20\pm 0.06$D; Crossed: $0.41\pm 0.40$mm, $0.31\pm 0.2$D). The peak velocity of pupil responses changed significantly as a function of amplitude (y=1.12x-0.38, $R^2=0.34$, $p<0.05$) and initial pupil diameter (y=0.28x-2.41, $R^2=0.64$, $p<0.05$). Changes in pupil size were associated with changes in accommodation. However, the ratio of pupil change to accommodation was not significantly different between crossed and uncrossed disparity (Uncrossed: $1.55\pm 0.69$mm/D; Crossed: $1.21\pm 0.51$mm/D; $p>0.05$). Conclusions:While fixation was maintained at the plane of the screen, the finding that pupil and accommodation changes have the same sign regardless of the sign of disparity suggests the response was driven by the apparent depth in the stimulus rather than its physical distance. The strength of the coupling between accommodation and pupil responses appears to be similar for crossed and uncrossed disparity. The amplitude and velocity of pupil responses depend on initial (starting) pupil diameter confirming the non-linearity in the operating range of the pupil. Keywords: accomodation * pupil * perception }, author = {Suryakumar, R. and Allison, R.S.}, booktitle = {Association for Research in Vision and Ophthalmology (ARVO) Annual Meeting}, date-added = {2011-05-06 16:09:53 -0400}, date-modified = {2016-01-03 03:28:10 +0000}, keywords = {Stereopsis}, month = {April 27th - May 1st}, organization = {Association for Research in Vision and Ophthamology}, title = {Accommodation and Pupil Responses to Random-dot Stereograms}, url-1 = {https://percept.eecs.yorku.ca/papers/raj arvo2008 abstract.pdf}, url-2 = {http://abstracts.iovs.org/cgi/content/abstract/49/5/1792}, volume = {49}, year = {2008}}
@incollection{Palmisano:2008tl, address = {Brisbane, Australia}, author = {Palmisano, S. and Favell, S. and Satchler, B. and Allison, R.}, booktitle = {Asia-Pacific Conference on Vision}, date-added = {2011-05-06 16:02:11 -0400}, date-modified = {2011-05-18 16:04:50 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = {07}, title = {Landing visually with flare: Pilots do it better}, year = {2008}}
@incollection{Allison:2008vy, abstract = {Successful adaptation to the microgravity environment of space and re-adaptation to gravity on earth requires recalibration of visual and vestibular signals. Despite decades of experimentation, motion sickness, spatial disorientation, reorientation illusions and degraded visuomotor performance continue to impact the availability and effectiveness of astronauts. We have found that incorporating jitter of the vantage point into visual displays produces more compelling illusions of self-motion (vection), despite generating greater sensory conflicts. We will discuss a series of ground-based experiments that examine a range of possible explanations for this phenomenon. Recent neuroimaging and neurophysiological data suggests that accelerating optic flow stimuli such the jittering optic flow used in our research may result in suppression of signals in vestibular cortex. Such visual modulation of vestibular signals is potentially important to understanding the initial response and adaptation to microgravity. Currently it is unclear what role gravity plays in the potentiation of vection with jittering optic flow. Ground and space based experiments will provide a unique opportunity to explore the jitter effect during periods of adaptation to altered gravity and to complement other research looking at vection on ISS. Our goals are to understand the role of gravity in jitter-enhanced vection, to develop the theory of how vestibular and visual signals are recalibrated in altered gravity and to study the time course of this adaptation. Keywords: visual, smooth, perturbed, self, motion, perception.}, address = {St. Hubert, Quebec}, author = {Allison, R.S. and Zacher, J.E. and Palmisano, S.A.}, booktitle = {CSA Life and Physical Science Workshop 2008: Scientific Advancement and Planning for Future Missions, Canadian Space Agency, Life and Physical Science Directorate}, date-added = {2011-05-06 15:58:30 -0400}, date-modified = {2016-01-03 03:26:40 +0000}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = {March 3rd-5th}, title = {Visual Perception of smooth and perturbed self-motion}, volume = {20}, year = {2008}}
@incollection{Gillam:2008gs, address = {Fremantle, Australia}, author = {Gillam, B. and Allison, R.S. and Palmisano, S.}, booktitle = {Australian Journal of Psychology (EPC abstracts)}, date-added = {2011-05-06 15:56:33 -0400}, date-modified = {2015-11-17 14:16:38 +0000}, doi = {10.1080/00049530802385541}, keywords = {Stereopsis}, month = {March 28th-30th}, pages = {72}, title = {Binocular depth and slant perception beyond ambient space}, url-1 = {http://dx.doi.org/10.1080/00049530802385541}, volume = {60 (Suppl)}, year = {2008}, url-1 = {https://doi.org/10.1080/00049530802385541}}
@incollection{Palmisano:2008kg, abstract = {Visual touchdown point perception during simulated landing - Abstract This study investigated visual touchdown point perception during simulated fixed-wing aircraft landing approaches. Experiments examined the effects of day versus night lighting, smooth versus buffeting simulated approaches, as well as a variety of other visual scene manipulations, including the presence or absence of: (i) 3-D buildings; (ii) a runway outline; (iii) a false explicit horizon; (iv) a true explicit horizon; and (v) different types of ground plane texture (random vs grid). After 4s exposure to each simulated landing approach, participants pointed to the location of their perceived touchdown point using the computer's mouse (performance feedback was provided on some trials). While our lighting, scenery and feedback manipulations significantly altered touchdown point judgments, performance was unacceptably imprecise and biased in all of the conditions tested. The findings provide further evidence that, by themselves, optic flow based perceptions of touchdown point are not sufficient for a pilot to safely land an airplane. This research was supported by ARC Discovery grant DP0772398.}, address = {Fremantle, Australia}, author = {Palmisano, S. and Allison, R.}, booktitle = {Australian Journal of Psychology (35th Australasian experimental psychology conference, EPC abstracts)}, date-added = {2011-05-06 15:53:16 -0400}, date-modified = {2011-05-22 13:37:51 -0400}, doi = {10.1080/00049530802385541}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = {March 28th-30th}, pages = {92-93}, title = {Visual Touchdown Point Perception During Simulated Aircraft Landing}, url-1 = {http://dx.doi.org/10.1080/00049530802385541}, year = {2008}, url-1 = {https://doi.org/10.1080/00049530802385541}}
@incollection{allison20081086-1086, abstract = {Sensations of depth can be produced by diplopic images with horizontal disparity beyond the fusion limit (conventional stereopsis), a monocular image flanking a binocular image (Panum's limiting case), and an eccentric monocular image (monoptic depth, Kaye 1978; Wilcox et al. 2007). Conceivably, depth perception in Panum's limiting case could be explained by stereopsis (double-duty matching, Hering 1879), monoptic depth or another mechanism entirely. Our goal is to determine which of these options is valid. Subjects judged the magnitude of perceived depth of a target stimulus viewed for 67 ms relative to a prior fixation point. The target was (1) a monocular vertical line with variable horizontal offset relative to a midline monocular line seen by the other eye (stereoscopic), (2) a monocular line with variable offset relative to a midline binocular line (Panum's limiting case), and (3) a monocular line with variable offset relative to the prior fixation point (monoptic). For Panum's limiting case, apparent depth at first increased with increasing lateral offset of the monocular line. However, this occurred only for offsets of up to 15 and 45 arcmin on the temporal and nasal side of retina, respectively. At larger offsets, depth was similar to that perceived from monoptic targets. In contrast, perceived depth from stereopsis increased with increasing disparity of up to $1^{\circ}$ and remained constant up to a disparity of at least $2^{\circ}$ (stimuli became diplopic at 30 arcmin). The magnitude of perceived depth was much smaller in monoptic compared with stereoscopic conditions, at all offsets. The distinct properties of depth perceived with these three types of stimuli suggest that they have different physiological substrates, and that depth from Panum's limiting case is not simply due to stereoscopic matching.}, author = {Fukuda, Kazuho and Wilcox, Laurie M. and Allison, Robert S. and Howard, Ian P.}, booktitle = {Journal of Vision}, date-modified = {2012-07-02 17:51:44 -0400}, doi = {10.1167/8.6.1086}, journal = {Journal of Vision}, keywords = {Depth perception}, number = {6}, pages = {1086-1086}, title = {Comparison of depth percepts created by binocular disparity, Panum's limiting case, and monoptic depth}, url-1 = {http://dx.doi.org/10.1167/8.6.1086}, volume = {8}, year = {2008}, url-1 = {https://doi.org/10.1167/8.6.1086}}
@incollection{allison2008646-646, abstract = {Earlier work from this laboratory established that cylovergence is induced more effectively by vertical shear disparity than by horizontal shear disparity in a large textured surface. We predicted that vertical shear disparity confined to stimuli along the horizontal meridian would evoke more cyclovergence than stimuli confined to the periphery. That is, shear disparity in the periphery can arise from surface inclination, while disparity along the central meridian arises only from torsional misalignment of the eyes. Binocular dichoptic stimuli were rotated in counterphase through $5^{\circ}$ peak-to-peak disparity at 0.1 Hz and presented in a mirror stereoscope. The stimuli were $70^{\circ}$ long randomly spaced lines that (1) filled a $70^{\circ}$ diameter circle, (2) were confined to a horizontal band $7^{\circ}$ wide, (3) filled the $70^{\circ}$ circle but with the central horizontal band blank. We used scleral search coils to measure cyclovergence of three subjects as they fixated at the center of planar stimuli. As predicted, the mean gain of cyclovergence was significantly higher (0.23) for the central band than for the display with the central band blank (0.12). However, the gain for the full $70^{\circ}$ display was higher (0.36) than that for the central band. We conclude that stimuli along the central horizontal meridian provide a stronger stimulus for cyclovergence than do stimuli outside the central meridian. However, increasing the total area of the stimulus also increases the gain of cyclovergence.<i>Acknowledgements: Supported by grants from the National Science and Engineering Council of Canada and the Canadian Institutes of Health Research.</i>}, author = {Daniels, Nicole T. and Howard, Ian P. and Allison, Robert S.}, booktitle = {Journal of Vision}, date-modified = {2012-07-02 17:51:09 -0400}, doi = {10.1167/8.6.646}, journal = {Journal of Vision}, keywords = {Vergence}, number = {6}, pages = {646-646}, title = {Gain of cyclovergence as a function of stimulus location}, url-1 = {http://dx.doi.org/10.1167/8.6.646}, volume = {8}, year = {2008}, url-1 = {https://doi.org/10.1167/8.6.646}}
@incollection{allison2008536-536, abstract = {Effective locomotion depends on judgements of the support, passability and effort to traverse provided by terrain several metres away. Elementary texts commonly assert that stereopsis per se is ineffective in these judgements beyond modest distances. He et al. (Perception, 2004, 33: 789) proposed that vergence and stereopsis calibrate and anchor depth percepts in near space that are then extended to larger distances by integrating monocular cues over the continuous ground plane. However, stereopsis has a much larger theoretical range and we have shown binocular performance improvements to at least 18.0m (VSS2007). Here we evaluate the contribution of binocular vision to judgements of ground surface properties. A computer-controlled constellation of LEDs was distributed throughout a volume of space centred 4.5 or 9.0 metres from the subject. LEDs could be selectively lit to create a single ground plane or two planes either adjacent or interleaved (simulating uneven terrain). In separate 2AFC experiments subjects discriminated: 1) the absolute slant of a single plane; 2) the relative slant between two adjacent planes; or 3) whether all the lights lay in a single plane or not (surface smoothness). Viewing was binocular or monocular. Binocular discrimination of absolute and relative slant showed less bias and was more precise than monocular discrimination for all tasks at both distances. Judgements of surface smoothness were very difficult monocularly compared to binocularly, as reflected in substantial differences in sensitivity (d'). Binocular vision is useful for judgements of the layout and regularity of terrain to at least 9.0 metres (an important range for moment-to-moment path planning during walking, running and assisted travel). In sum, binocular vision can contribute to precise judgements of ground surface properties. This contribution is not simply limited to calibration and anchoring of monocular cues in personal space.}, author = {Allison, Robert S. and Gillam, Barbara J. and Palmisano, Stephen A.}, booktitle = {Journal of Vision}, date-modified = {2012-07-02 17:50:42 -0400}, doi = {10.1167/8.6.536}, journal = {Journal of Vision}, keywords = {Stereopsis}, number = {6}, pages = {536-536}, title = {Binocular slant discrimination beyond interaction space}, url-1 = {http://dx.doi.org/10.1167/8.6.536}, volume = {8}, year = {2008}, url-1 = {https://doi.org/10.1167/8.6.536}}
@inproceedings{allison2008127-130, abstract = {Effective management and treatment of glaucoma and other visual diseases depend on early diganosis. However, early symptoms of glaucoma often go unnoticed until if significant portion of the visual field is lost, The ability to simulate the visual Consequences of the disease offers potential benefits for patients and clinical education as well as for Public awareness of its signs and symptoms. Experiments using simulated visual field could identify changes in behaviour, for example during driving, that one uses 10 compensate at the early stages of the disease's development. Furthermore, by understanding how visual field defects affect performance of visual tasks, we can help develop new strategies to cope with other devastating diseases macular degeneration. A Gaze-Contingent Display (GCD) system Was developed to simulate an arbitrary visual held in a virtual environment. The system can estimate real-time gaze direction and eye position in earth-fixed coordinates during relatively large head movement. and thus it call be used in immersive projection based VE systems like the CAVE (TM). Arbitrary visual fields are simulated via OpenGL and Shading Language capabilities and techniques that are supported by the GPU, thus enabling fast performance in real time. In order to simulate realistic visual defects, the system performs multiple image processing operations including change in acuity, brightness, color, glare and image distortion. The final component of the system simulated different virtual scenes that the participant call navigate through and explore. As a result, this system creates an experimental environment to study the effects of low vision on everyday tasks such its driving and navigation.}, address = {New York}, author = {Vinnikov, M. and Allison, R.S. and Swierad, D.}, booktitle = {Proceedings of the Eye Tracking Research and Applications Symposium}, date-modified = {2012-07-02 22:36:37 -0400}, doi = {10.1145/1344471.1344504}, editor = {Spencer, S. N.}, keywords = {Eye Movements & Tracking}, pages = {127-130}, publisher = {Assoc Computing Machinery}, title = {Real-Time Simulation of Visual Defects with Gaze-Contingent Display}, url-1 = {http://dx.doi.org/10.1145/1344471.1344504}, year = {2008}, url-1 = {https://doi.org/10.1145/1344471.1344504}}
@inproceedings{allison2008198-204, abstract = {Convergence insufficiency is characterized by an inability to maintain effortless alignment of the two eyes (binocular convergence) while performing near tasks. Conventional rehabilitative vision therapy for the condition is monotonous and dull, leading to low levels of compliance. If the therapy is not performed then improvements in the condition are unlikely. This paper examines the use of computer games as a new delivery paradigm for vision therapy, specifically at how they can be used in the treatment of convergence insufficiency while at home. A game was created and tested in a small scale clinical trial. Results show clinical improvements, as well as high levels of compliance and motivation. Additionally, the game was able to objectively track patient progress and compliance.}, address = {Vancouver, Canada}, author = {Carvelho, T. and Allison, R.S. and Irving, E. L. and Herriot, C.}, booktitle = {2008 Virtual Rehabilitation}, date-modified = {2012-07-02 22:39:29 -0400}, doi = {10.1109/ICVR.2008.4625160}, keywords = {Gaming for Vision Therapy}, pages = {198-204}, publisher = {IEEE, New York}, title = {Computer Gaming for Vision Therapy}, url-1 = {http://dx.doi.org/10.1109/ICVR.2008.4625160}, url-2 = {http://dx.doi.org/10.1109/ICVR.2008.4625160}, year = {2008}, url-1 = {https://doi.org/10.1109/ICVR.2008.4625160}}
@article{allison2007317-327, abstract = {A basic task in the construction and use of a stereoscopic camera and display system is the alignment of the left and right images appropriately-a task generally referred to as camera convergence. Convergence of the real or virtual stereoscopic cameras can shift the range of portrayed depth to improve visual comfort, can adjust the disparity of targets to bring them nearer to the screen and reduce accommodation-vergence conflict, or can bring objects of interest into the binocular field of view. Although camera convergence is acknowledged as a useful function, there has been considerable debate over the transformation required. It is well known that rotational camera convergence or {`}toe-in{'} distorts the images in the two cameras producing patterns of horizontal and vertical disparities that can cause problems with fusion of the stereoscopic imagery. Behaviorally, similar retinal vertical disparity patterns are known to correlate with viewing distance and strongly affect perception of stereoscopic shape and depth. There has been little analysis of the implications of recent findings on vertical disparity processing for the design of stereoscopic camera and display systems. I ask how such distortions caused by camera convergence affect the ability to fuse and perceive stereoscopic images. 2007 Society for Imaging Science and Technology.}, author = {Allison, R.S.}, date-modified = {2012-07-02 19:04:54 -0400}, doi = {0.2352/J.ImagingSci.Technol.(2007)51:4(317)}, journal = {Journal of Imaging Science and Technology}, keywords = {Stereopsis}, number = {4}, pages = {317-327}, title = {Analysis of the influence of vertical disparities arising in toed-in stereoscopic cameras}, url-1 = {http://dx.doi.org/0.2352/J.ImagingSci.Technol.(2007)51:4(317)}, url-2 = {http://dx.doi.org/0.2352/J.ImagingSci.Technol.(2007)51:4(317)}, volume = {51}, year = {2007}, url-1 = {https://doi.org/0.2352/J.ImagingSci.Technol.(2007)51:4(317)}}
@incollection{Vinnikov:2007fd, address = {Toronto, Canada}, author = {Vinnikov, M. and Allison, R.S.}, booktitle = {OCE Discovery 2007}, date-added = {2011-05-09 11:19:31 -0400}, date-modified = {2011-05-11 11:59:49 -0400}, keywords = {Night Vision}, month = {05}, title = {Simulation of NVG-aided flight over terrain}, year = {2007}}
@incollection{Guterman:2007fb, address = {Toronto, Canada}, author = {Guterman, P. and Allison, R.S.}, booktitle = {OCE Discovery 2007}, date-added = {2011-05-09 11:17:59 -0400}, date-modified = {2011-05-11 11:59:49 -0400}, keywords = {Night Vision}, month = {05}, title = {Assessing Image Intensifier Integration in Emergency and Security Response}, url-1 = {http://ocediscovery.com/video/PearlGuterman/Pearl_Guterman-OCE_Video_Contest.ppt}, year = {2007}}
@incollection{Guterman-P.:2007kq, address = {Toronto, Canada}, author = {Guterman, P.S. and Allison, R.S. and Rushton, S.K.}, booktitle = {CVR Conference 2007: Cortical Mechanisms of Vision}, date-added = {2011-05-09 11:16:14 -0400}, date-modified = {2012-09-24 14:46:23 +0000}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = {06}, organization = {Centre for Vision Research, York University}, title = {The Visual Control of Locomotion as a Function of the Richness of the Visual Environment}, year = {2007}}
@incollection{Vinnikov:2007qb, abstract = {Effective management and treatment of glaucoma and other visual diseases depend on early diagnosis. However, early symptoms of many sever visual disease often go unnoticed until a significant portion of the visual field is lost. The ability to simulate the visual consequences of the disease offers potential benefits for patients and clinical education as well as for public awareness of its signs and symptoms. Experiments using simulated visual field defects could identify changes in behaviour, for example during driving, that one uses to compensate at the early stages of the disease's development. Furthermore, by understanding how visual field defects affect performance in visual tasks, we can help develop new strategies to cope with other devastating diseases such as macular degeneration. A Gaze-Contingent Display (GCD) system was developed to simulate an arbitrary visual field in a virtual environment. The system can estimate real-time gaze direction and eye position in earth-fixed coordinates during relatively large head movement, and thus it can be used in immersive projection based VE systems like the CAVETM . Arbitrary visual fields are simulated via OpenGL and Shading Language capabilities and techniques that are supported by the GPU, thus enabling fast performance in real time. In order to simulate realistic visual defects, the system performs multiple image processing operations including change in acuity, brightness, color, glare and image distortion. The final component of the system simulates different virtual scenes that the participant can navigate through and explore. As a result, this system creates an experimental environment that could be useful for studying the effects of low vision on everyday tasks such as driving and navigation.}, address = {Waterloo, Canada}, author = {Vinnikov, M. and Allison, R.S. and Huang, H}, booktitle = {UW-IEEE joint Symposium on Biomedical Imaging and Computer Vision (BICV 2007)}, date-added = {2011-05-09 11:14:43 -0400}, date-modified = {2011-05-18 16:11:47 -0400}, keywords = {Eye Movements & Tracking}, month = {09}, title = {Real time simulation of impaired vision in naturalistic settings with gaze-contingent display}, year = {2007}}
@incollection{Allison:2007vp, address = {Ottawa, Canada}, author = {Allison, R.S.}, booktitle = {CRSS/ASPRS 2007 Fall Conference, Our Common borders - Safety, Security and the Environment Through Remote Sensing}, date-added = {2011-05-09 11:12:18 -0400}, date-modified = {2011-05-11 11:59:49 -0400}, keywords = {Night Vision}, month = {October 28th- November 1st}, title = {The Importance of Flight Test and Evaluation in the Development of Airborne Technologies for Border Enforcement: Assessing Night Vision Goggle Performance in Security Applications}, year = {2007}}
@incollection{Sakano:2007hc, abstract = {We investigated whether motion aftereffect (MAE) in depth can be induced by adaptation to apparent motion-in-depth based on binocular cues: changing disparity and interocular velocity differences. The adaptation stimulus was a random-dot stereogram (RDS) in which the absolute disparity alternated every frame between two values while the dot distribution changed randomly every second frame. Thus, the stimulus contained a coherent interocular velocity difference signal to adapt motion-in-depth and a balanced changing disparity signal that should not induce a MAE in depth. The test stimulus was a stationary random-dot stereogram depicting a fronto-parallel plane. MAE in depth occurred in the opposite direction to the coherent interocular velocity difference based adapting stimulus, supporting the idea that adaptation to interocular velocity differences produces MAE in depth.}, address = {Tokyo, Japan}, author = {Sakano, Y. and Allison, R.S.}, booktitle = {The Journal of the Vision Society of Japan (Winter Annual Meeting 2007 of The Vision Society of Japan (VSJ))}, date-added = {2011-05-06 16:39:56 -0400}, date-modified = {2013-12-28 14:59:48 +0000}, keywords = {Motion in depth}, month = {Jan 31-Feb 2}, organization = {Vision Society of Japan}, title = {Adaptation to apparent motion-in-depth based on binocular cues}, volume = {58}, year = {2007}}
@incollection{Allison:2007rp, address = {Toronto, Canada}, author = {Allison, R.S.}, booktitle = {CASI 54th Aeronautics Conference}, date-added = {2011-05-06 16:34:07 -0400}, date-modified = {2011-05-11 11:59:49 -0400}, keywords = {Night Vision}, month = {04}, pages = {64-65}, title = {Assessing Night Vision Goggle Performance in Security Applications}, url-1 = {https://percept.eecs.yorku.ca/papers/casi paper.pdf}, year = {2007}}
@incollection{Herriot:2007gd, abstract = {Computerized Gaming Technology as an Effective Form of Vision Therapy for Convergence Insufficiency Authors Christopher Herriot, School of Optometry, University of Waterloo Tristan Carvelho, Computer Science and Engineering, York University Robert Allison, Computer Science and Engineering, York University Elizabeth Irving, School of Optometry, University of Waterloo Purpose The prevalence of convergence insufficiency (CI) has been estimated between 2.2 and 13\%. CI is most often treated with some form of eye exercises. However, traditional forms of the exercises are tedious and boring, leading to poor patient compliance. The purpose of this pilot study is to investigate the efficacy and user acceptance of computerized gaming as a form of treatment for convergence insufficiency. Methods Four participants were selected for the study, with ages ranging from 11 to 34 years. Participants had to have corrected visual acuity of 6/12, stereo-acuity threshold \leq 40 seconds of arc, near point of convergence (NPC) \geq 6cm, exophoria greater at near than at far, and a positive fusional vergence (PFV) \leq 15\Delta or not meeting Sheard's criterion. Participants were asked to play a revised version of Pac-Man using a stereoscope to fuse two separate images. As the participant's convergence improved, the convergence demand was increased and operant conditioning paradigms were used to keep the participant motivated. Training was performed for 20 minutes each day, 5 days of the week for 2 weeks. Participants were asked to complete as visual symptom questionnaire prior to their training and an acceptance questionnaire after completion. Results Prior to training the average NPC and PFV values were 12.9 +/- 7.6 cm and 13 +/- 4.2\Delta respectively, and two participants reported visual symptoms of CI. Upon completion of the training, the average NPC decreased to 5.8 +/- 3.0 cm and the average PFV increased to 25 +/- 4.1\Delta. The two symptomatic participants reported relief of their symptoms, and all participants reported that computerized vision therapy was easy to understand, entertaining, and motivating. Conclusion Computerized gaming is a promising method to improve patient motivation and compliance. Further testing is required to directly compare its effectiveness to traditional methods. }, address = {Waterloo, Canada}, author = {Herriot, C. and Carvelho, T. and Allison, R.S. and Irving, E.L.}, booktitle = {Sixth Canadian Optometry Conference on Vision Science}, date-added = {2011-05-06 16:30:31 -0400}, date-modified = {2012-07-02 22:39:14 -0400}, keywords = {Gaming for Vision Therapy}, month = {Dec 7th-9th}, title = {Computerized Gaming Technology as an Effective Form of Vision Therapy for Convergence Insufficiency}, url-1 = {https://percept.eecs.yorku.ca/papers/COCVS abstract Herriot.doc}, year = {2007}}
@incollection{Thomas:2007tn, address = {Toronto, Canada}, author = {Thomas, P.J. and Jenning, S. and Macuda, M. and Allison, R.S. and Hornsey, R.}, booktitle = {Advanced Deployable Day/Night Simulation (ADDNS) Symposium}, date-added = {2011-05-06 16:19:43 -0400}, date-modified = {2011-05-18 15:56:41 -0400}, keywords = {Night Vision}, month = {November 13th-14th}, pages = {43-45}, title = {Experimental validation of an NVD parametric model}, url-1 = {https://percept.eecs.yorku.ca/papers/Allison- Experimental validation of an NVD Parametric model.pdf}, year = {2007}}
@incollection{Allison:2007hc, address = {Toronto, Canada}, author = {Allison, R.S. and Brandwood, T. and Vinnikov, M. and Zacher, J.E. and Jenning, S. and Macuda, M. and Thomas, P.J. and Palmisano, S.A.}, booktitle = {Advanced Deployable Day/Night Simulation (ADDNS) Symposium}, date-added = {2011-05-06 16:17:47 -0400}, date-modified = {2011-05-18 16:11:30 -0400}, keywords = {Night Vision}, month = {November 13th-14th}, pages = {27-29}, title = {Psychophysics of night vision device halo}, url-1 = {https://percept.eecs.yorku.ca/papers/Allison- Psychophysics of Night Vision Device Halo.pdf}, year = {2007}}
@incollection{allison200730-30, author = {Sadr, S. and Allison, R.S. and Vinnikov, M.}, booktitle = {Perception}, date-modified = {2011-09-12 21:59:21 -0400}, journal = {Perception}, keywords = {Eye Movements & Tracking}, pages = {30-30}, title = {Effect of scene transitions on trans-saccadic change detection in natural scenes}, url-1 = {https://percept.eecs.yorku.ca/papers/Effects of scene transition.pdf}, url-2 = {http://www.perceptionweb.com/abstract.cgi?id=v070786}, volume = {36}, year = {2007}}
@incollection{allison20071017-1017, abstract = {What visual information guides locomotion? Optic flow, the global pattern of motion at the vantage point of the eye, specifies the direction of self-motion, and could be used to control walking. Alternatively, we could walk in the perceived direction of a target. Recent evidence suggests that the type of visual environment can influence steering behaviour. However, controversy remains as to whether this demonstrates direct, online use of flow or indirect influence on context and recalibration of direction. The current literature is complicated by methodological as well as theoretical differences between prism-based and head mounted display based studies. Both techniques have well-known limitations that have complicated comparisons across studies. Here we tested undergraduate students (n = 6) using an immersive virtual environment, where the heading specified by flow was displaced by $0^{\circ}$, $\pm 5^{\circ}$ and $\pm 10^{\circ}$ from the direction of the target through the virtual environment or prism displacement. Observers walked (stepped in-place) to a target in five virtual environments, which consisted of a plain gray or textured ground; blue sky; and zero, one, ten, or twenty objects in it. The distance to the target from the start position was 20 m, nearly double that of comparable studies. For all displacement conditions, observers walked in the perceived direction of the target, and there was no significant main effect of the environment. The findings suggest that egocentric direction is used to guide locomotion on foot, regardless of more or less objects that enhance flow in the environment.}, author = {Guterman, Pearl S. and Allison, Robert S. and Rushton, Simon K.}, booktitle = {Journal of Vision}, date-modified = {2012-07-02 17:52:19 -0400}, doi = {10.1167/7.9.1017}, journal = {Journal of Vision}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {9}, pages = {1017-1017}, title = {The visual control of walking: do we go with the (optic) flow?}, url-1 = {http://dx.doi.org/10.1167/7.9.1017}, volume = {7}, year = {2007}, url-1 = {https://doi.org/10.1167/7.9.1017}}
@incollection{allison2007203-203, abstract = {Horizontal-disparity processing in the presence of vertical misalignment: The role of monoptic depth K Fukuda, L M Wilcox, R S Allison, I P Howard Depth perception from stereopsis is thought to be resilient to vertical misalignments of up to $4^{\circ}$ (Ogle, 1955 Archives of Ophthalmology 53 495 ff; Mitchell, 1970 Vision Research 10 145 - 162). We have replicated these results, and assessed the assumption that horizontal disparity is responsible for depth in such stimuli. A horizontal line, which extended the width of the display, was inserted between the vertically misaligned horizontally disparate targets. Surprisingly, this had no effect on depth-discrimination performance. We repeated the study with only one half-image (a monoptic target) and the central line. Depth discrimination was above chance for all observers, suggesting that previous results were not due to horizontal disparity, but to the retinal position of the stimuli (Kaye, 1978 Vision Research 18 1013 - 1022; Wilcox et al, 2007 Vision Research 47 in press). Tolerance to vertical misalignment has been used as evidence against an epipolar constraint in human stereopsis; the presence of monoptic depth cues in such stimuli suggests that the issue is unresolved. [Supported by Natural Sciences and Engineering Research Council of Canada and CIHR Training Grant in Vision Health Research.] }, author = {Fukuda, K. and Wilcox, L. M. and Allison, R.S. and Howard, I. P.}, booktitle = {Perception}, date-modified = {2011-09-12 22:04:42 -0400}, journal = {Perception}, keywords = {Depth perception}, pages = {203-203}, title = {Horizontal-disparity processing in the presence of vertical misalignment: The role of monoptic depth}, url-1 = {https://percept.eecs.yorku.ca/papers/fukuda ECVP 2007.doc}, url-2 = {http://www.perceptionweb.com/abstract.cgi?id=v070330}, volume = {36}, year = {2007}}
@incollection{allison2007817-817, abstract = {The benefits of binocular vision have been debated throughout the history of vision science yet few studies have considered its contribution beyond a viewing distance of a few metres. What benefit, if any, does binocular vision confer for distance vision? Elementary texts commonly assert that stereopsis is ineffective beyond modest distances despite theoretical analysis suggesting a much larger effective range. We compared monocular and binocular performance on depth interval estimation and discrimination tasks at and beyond 4.5m. Stimuli consisted of a combination of: 1) the reference stimulus, a smoothly finished wooden architectural panel, mounted upright, and facing the subject, 2) the test stimulus, a thin rod that could be precisely moved in depth and 3) a homogeneous background. An aperture prevented view of the top and bottom of the stimuli. Subjects made verbal, signed estimates in cm of the depth between the test and reference stimuli. On each trial, the depth was set between $\pm 100$cm. Observers viewed the displays either monocularly or binocularly from 4.5, 9.0 or 18.0m. Depth discrimination at 9.0 m was also evaluated using adaptive staircase procedures. Regression analysis provided measures of the scaling between perceived depth and actual depth (the `gain') and the precision. Under monocular conditions, perceived depth was significantly compressed. Binocular depth estimates were much nearer to veridical although also compressed. Both raw precision measures and those normalized by the gain were much smaller for binocular compared to monocular conditions (ratios between 2.1 and 48). We confirm that stereopsis supports reliable depth discriminations beyond typical laboratory distances. Furthermore, binocular vision can significantly improve both the accuracy and precision of depth estimation to at least 18m. We will discuss additional experiments to extend these results to larger viewing distances and to evaluate the contribution of stereopsis under rich cue conditions.}, author = {Allison, Robert and Gillam, Barbara and Vecellio, Elia}, booktitle = {Journal of Vision}, date-modified = {2012-07-02 17:50:04 -0400}, doi = {10.1167/7.9.817}, journal = {Journal of Vision}, keywords = {Stereopsis}, number = {9}, pages = {817-817}, title = {Binocular depth discrimination and estimation beyond interaction space}, url-1 = {http://dx.doi.org/10.1167/7.9.817}, volume = {7}, year = {2007}, url-1 = {https://doi.org/10.1167/7.9.817}}
@inproceedings{Craig:2007lg, author = {Craig, G. and Erdos, E. and Carignan, S. and Jennings, S. and Swail, C. and Ellis, K. and Gubbels, A. W. and Macuda, T. and Schnell, T. and Poolman, P. and Allison, R.S. and Cheung, R.}, booktitle = {78th Annual Scientific Meeting of the Aerospace Medical Association}, date-added = {2011-05-09 13:56:59 -0400}, date-modified = {2011-05-18 16:23:28 -0400}, keywords = {Neural Avionics}, month = {05}, organization = {Aerospace Medical Association}, title = {Toward the ''Cognitive Cockpit'': Flight Test Platforms and Methods for Monitoring Pilot Mental State}, year = {2007}}
@inproceedings{Allison:2007zl, abstract = {Police and border security operations are an important and growing application of night vision devices (NVDs). NVDs improve visibility at night but suffer from a variety of perceptual artifacts and human factors issues. In a series of helicopter-based flight trials we analyzed subject performance on model tasks based on typical security applications. Subjects performed the tasks under conditions of unaided daytime vision, unaided nighttime vision or image intensified nighttime vision. The tasks included directed search over open and forested terrain, detection and identification of a temporary landing zone and search/tracking of a moving vehicle marked with a covert IR marker. The results of this study confirm that NVDs can provide significant operational value but also illustrate the limitations of the technology}, address = {Toronto, Canada}, author = {Allison, R.S. and Guterman, P. and Jennings, S. and Macuda, T. and Sakano, Y. and Thomas, P. and Zacher, J}, booktitle = {Aerospace in Canada: Research and Innovation for the Global Marketplace, 2007 AERO Conference (Flight Test Methods Symposium)}, date-added = {2011-05-06 13:11:02 -0400}, date-modified = {2011-05-18 15:43:56 -0400}, keywords = {Night Vision}, month = {April 24th-26th}, title = {Assessing Night Vision Goggle Performance in Security Applications}, url-1 = {https://percept.eecs.yorku.ca/papers/casi paper.pdf}, year = {2007}}
@inproceedings{allison2007U5570-U5570, abstract = {Night vision devices (NVDs) or night-vision goggles (NVGs) based on image intensifiers improve nighttime visibility and extend night operations for military and increasingly civil aviation. However, NVG imagery is not equivalent to daytime vision and impaired depth and motion perception has been noted. One potential cause of impaired perceptions of space and environmental layout is NVG halo, where bright light sources appear to be surrounded by a disc-like halo. In this study we measured the characteristics of NVG halo psychophysically and objectively and then evaluated the influence of halo on perceived environmental layout in a simulation experiment. Halos are generated in the device and are not directly related to the spatial layout of the scene. We found that, when visible, halo image (i.e. angular) size was only weakly dependent on both source intensity and distance although halo intensity did vary with effective source intensity. The size of halo images surrounding lights sources are independent of the source distance and thus do not obey the normal laws of perspective. In simulation experiments we investigated the effect of NVG halo on judgements of observer attitude with respect to the ground during simulated flight. We discuss the results in terms of NVG design and of the ability of human operators to compensate for perceptual distortions.}, address = {Bellingham}, author = {Zacher, J. E. and Brandwood, T. and Thomas, P. and Vinnikov, M. and Xu, G. and Jennings, S. and Macuda, T. and Palmisano, S. A. and Craig, G. and Wilcox, L. and Allison, R.S.}, booktitle = {Head- and Helmet-Mounted Displays XII: Design and Applications}, date-modified = {2012-07-02 22:23:11 -0400}, doi = {10.1117/12.719892}, editor = {Brown, R. W. and Reese, C. E. and Marasco, P. L. and Harding, T. H.}, keywords = {Night Vision}, pages = {U5570-U5570}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of the Society of Photo-Optical Instrumentation Engineers (SPIE)}, title = {Effects of image intensifier halo on perceived layout}, url-1 = {http://dx.doi.org/10.1117/12.719892}, url-2 = {http://dx.doi.org/10.1117/12.719892}, volume = {6557}, year = {2007}, url-1 = {https://doi.org/10.1117/12.719892}}
@inproceedings{allison2007123-130, abstract = {Application designers of collaborative distributed Virtual Environments must account for the influence of the network connection and its detrimental effects on user performance. Based upon analysis and classification of existing latency compensation techniques, this paper introduces a novel approach to latency amelioration in the form of a two-tier predictor-estimator framework. The technique is variability-aware due to its proactive sender-side prediction of a pose a variable time into the future. The prediction interval required is estimated based on current and past network delay characteristics. This latency estimate is subsequently used by a Kalman Filter-based predictor to replace the measurement event with a predicted pose that matches the event's arrival time at the receiving workstation. The compensation technique was evaluated in a simulation through an offline playback of real head motion data and network delay traces collected under a variety of real network conditions. The experimental results indicate that the variability-aware approach significantly outperforms a state-of-the-art one, which assumes a constant system delay.}, address = {New York}, author = {Tumanov, A. and Allison, R.S. and Stuerzlinger, W.}, booktitle = {Proceedings of IEEE Virtual Reality 2007}, date-modified = {2011-05-11 13:23:56 -0400}, doi = {10.1109/VR.2007.352472}, editor = {Sherman, W. and Lin, M. and Steed, A.}, keywords = {Augmented & Virtual Reality}, pages = {123-130}, publisher = {IEEE}, title = {Variability-aware latency amelioration in distributed environments}, url-1 = {http://dx.doi.org/10.1109/VR.2007.352472}, url-2 = {http://dx.doi.org/10.1109/VR.2007.352472}, year = {2007}, url-1 = {https://doi.org/10.1109/VR.2007.352472}}
@inproceedings{allison20071-8, abstract = {Expected temporal effects in a night vision goggle (NVG) include the fluorescence time constant, charge depletion at high signal levels, the response time of the automatic gain control (AGC) and other internal modulations in the NVG. There is also the possibility of physical damage or other non-reversible effects in response to large transient signals. To study the temporal behaviour of an NVG, a parametric Matlab model has been created. Of particular interest in the present work was the variation of NVG gain, induced by its automatic gain control (AGC), after a short, intense pulse of light. To verify the model, the reduction of gain after a strong pulse was investigated experimentally using a simple technique. Preliminary laboratory measurements were performed using this technique. The experimental methodology is described, along with preliminary validation data. Keywords: Night Vision Goggles, NVG, automatic gain control, AGC, modeling, temporal behaviour}, author = {Thomas, P. J. and Allison, R.S. and Jennings, S. and Macuda, T. and Zacher, J. and Mehbratu, H. and Hornsey, R.}, booktitle = {Proceedings of SPIE - The International Society for Optical Engineering}, date-modified = {2012-07-02 22:23:38 -0400}, doi = {10.1117/12.719685}, editor = {Randall W. Brown and Colin E. Reese and Peter L. Marasco and Thomas H. Harding}, keywords = {Night Vision}, organization = {International Society for Optics and Photonics}, pages = {1-8}, title = {Comparison of an NVG model with experiments to elucidate temporal behaviour}, url-1 = {http://dx.doi.org/10.1117/12.719685}, url-2 = {http://dx.doi.org/10.1117/12.719685}, volume = {6557}, year = {2007}, url-1 = {https://doi.org/10.1117/12.719685}}
@techreport{allison2007, author = {Macuda, T. and Craig, G. and Swail, C. and Gubbels, A. W. and Ellis, K. and Jennings, S. and Carignan, S. and Erdos, R. and Allison, R.S. and Byrne, A. and Schnell, T. and Edgar, T.}, date-modified = {2012-07-02 19:54:51 -0400}, institution = {National Research Council Canada, Institute for Aerospace Research}, keywords = {Misc.}, note = {NRC Institute for Aerospace Research}, number = {FRL-2007-0007}, title = {Successful Flight Trials Gatekeeper Viperfish Digital Video Recorder: Technology Transfer through National Laboratory Infrastructure}, year = {2007}}
@article{Wilcox:2006am, abstract = {Improving the sense of ``presence'' is a common goal of three-dimensional (3D) display technology for film, television, and virtual reality. However, there are instances in which 3D presentations may elicit unanticipated negative responses. For example, it is well established that violations of interpersonal space cause discomfort in real-world situations. Here we ask if people respond similarly when viewing life-sized stereoscopic images. Observers rated their level of comfort in response to animate and inanimate objects in live and virtual (stereoscopic projection) viewing conditions. Electrodermal activity was also recorded to monitor their physiological response to these stimuli. Observers exhibited significant negative reactions to violations of interpersonal space in stereoscopic 3D displays, which were equivalent to those experienced in the natural environment. These data have important implications for the creation of 3D media and the use of virtual reality systems.}, author = {Wilcox, L. and Allison, R.S. and Elfassy, S. and Grelik, C.}, date-added = {2011-05-06 12:07:33 -0400}, date-modified = {2012-07-02 19:07:07 -0400}, doi = {10.1145/1190036.1190041}, journal = {{ACM} Transactions on Applied Perception (TAP)}, keywords = {Augmented & Virtual Reality}, number = {4}, pages = {412-418}, title = {Personal space in virtual reality}, url-1 = {http://dx.doi.org/10.1145/1190036.1190041}, url-2 = {http://dx.doi.org/10.1145/1190036.1190041}, volume = {3}, year = {2006}, url-1 = {https://doi.org/10.1145/1190036.1190041}}
@article{allison2006108-21, abstract = {Existing commercial technologies do not adequately meet the requirements for tracking in fully enclosed virtual reality displays. We present a novel six degree of freedom tracking system, the hedgehog; which overcomes several limitations inherent in existing sensors and tracking technology. The system reliably estimates the pose of the user's head with high resolution and low spatial distortion. Light emitted from an arrangement of lasers projects onto the display walls. An arrangement of cameras images the walls and the two-dimensional centroids of the projections are tracked to estimate the pose of the device. The system is able to handle ambiguous laser projection configurations, static and dynamic occlusions of the lasers, and incorporates an auto-calibration mechanism due to the use of the SCAAT (single constraint at a time) algorithm. A prototype system was evaluated relative to a state-of-the-art motion tracker and showed comparable positional accuracy (1-2 mm RMS) and significantly better absolute angular accuracy (0.1 ° RMS)}, author = {Vorozcovs, A. and Stuerzlinger, W. and Hogue, A. and Allison, R.S.}, date-modified = {2012-07-02 19:05:47 -0400}, doi = {10.1162/pres.2006.15.1.108}, journal = {Presence}, keywords = {Augmented & Virtual Reality}, number = {1}, pages = {108-21}, title = {The hedgehog: a novel optical tracking method for spatially immersive displays}, url-1 = {http://dx.doi.org/10.1162/pres.2006.15.1.108}, url-2 = {http://dx.doi.org/10.1162/pres.2006.15.1.108}, volume = {15}, year = {2006}, url-1 = {https://doi.org/10.1162/pres.2006.15.1.108}}
@article{allison200657-71, abstract = {Three experiments examined the effects of image decorrelation on the stereoscopic detection of sinusoidal depth gratings in static and dynamic random-dot stereograms (RDS). Detection was found to tolerate greater levels of image decorrelation as: (i) density increased from 23 to 676dots/deg<sup>2</sup>; (ii) spatial frequency decreased from 0.88 to 0.22cpd; (iii) amplitude increased above 0.5arcmin; and (iv) dot lifetime decreased from 1.6s (static RDS) to 80ms (dynamic RDS). In each case, the specific pattern of tolerance to decorrelation could be explained by its consequences for image sampling, filtering, and the influence of depth noise. [All rights reserved Elsevier]}, author = {Palmisano, S. and Allison, R.S. and Howard, I. P.}, date-modified = {2011-05-10 14:48:02 -0400}, doi = {10.1016/j.visres.2005.10.005}, journal = {Vision Research}, keywords = {Stereopsis}, number = {1-2}, pages = {57-71}, title = {Effect of decorrelation on 3-D grating detection with static and dynamic random-dot stereograms}, url-1 = {http://dx.doi.org/10.1016/j.visres.2005.10.005}, url-2 = {http://dx.doi.org/10.1016/j.visres.2005.10.005}, volume = {46}, year = {2006}, url-1 = {https://doi.org/10.1016/j.visres.2005.10.005}}
@article{allison20064048-58, abstract = {We report a novel illusory distortion of the visual scene, which became apparent during both: (i) observer rotation inside a furnished stationary room; and (ii) room rotation about the stationary observer. While this distortion had several manifestations, the most common experience was that scenery near fixation appeared to sometimes lead and other times lag more peripheral scenery. Across a series of experiments, we eliminated explanations based on eye-movements, distance misperception, peripheral aliasing, differential motion sensitivity and adaptation. We found that these illusory scene distortions occurred only when the observer perceived (real or illusory) changes in self-tilt and maintained a stable fixation. [All rights reserved Elsevier]}, author = {Palmisano, S. and Allison, R.S. and Howard, I. P.}, date-modified = {2011-05-11 13:18:05 -0400}, doi = {10.1016/j.visres.2006.07.020}, journal = {Vision Research}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {23}, pages = {4048-58}, title = {Illusory scene distortion occurs during perceived self-rotation in roll}, url-1 = {http://dx.doi.org/10.1016/j.visres.2006.07.020}, url-2 = {http://dx.doi.org/10.1016/j.visres.2006.07.020}, volume = {46}, year = {2006}, url-1 = {https://doi.org/10.1016/j.visres.2006.07.020}}
@incollection{macuda_evaluation_????, author = {Macuda, T. and Craig, G. and Jennings, S. and Carignan, S. and Erdos, R. and Brulotte, M. and Allison, R.}, booktitle = {South Australian Night Vision Conference}, date-added = {2012-03-11 20:48:28 -0400}, date-modified = {2012-03-11 20:50:35 -0400}, keywords = {Night Vision}, series = {South Australian Night Vision Conference, Nov. 20-23, 2006}, title = {Evaluation of Night Vision Technologies using Laboratory and Flight Tests}, year = {2006}}
@incollection{Macuda:2006wy, address = {Old Windsor, UK}, author = {Macuda, T. and Craig, G. and Erdos, R. and Carignan, S. and Jennings, S. and Allison, R.S. and Brulotte, M.}, booktitle = {Night Vision Technology Forum}, date-added = {2011-05-09 14:08:32 -0400}, date-modified = {2011-09-12 22:23:04 -0400}, keywords = {Night Vision}, month = {03}, title = {Assessing the Impact of NVGs on Vision and Flight Performance using Field, Flight, Laboratory and Aeromedical Approaches}, year = {2006}}
@incollection{Macuda:2006kv, address = {Adelaide, Australia}, author = {Macuda, T. and Craig, G. and Jennings, S. and Carignan, S. and Erdos, R. and Brulotte, M. and Allison, R}, booktitle = {South Australian Night Vision Conference}, date-added = {2011-05-09 14:07:20 -0400}, date-modified = {2011-09-12 22:22:43 -0400}, keywords = {Night Vision}, month = {11}, title = {Evaluation of Night Vision Technologies using Laboratory and Flight Tests}, year = {2006}}
@incollection{Macuda:2006sh, abstract = {A Multidisciplinary Approach Towards Assessing Night Vision Technologies at the Flight Research Laboratory: Integrating Laboratory, Field, Flight and Aeromedical approaches. Todd Macuda, Greg Craig, Michel Brulotte, Rob Allison, Bob Crowell, Don Filiter, Greg Lester, Scott Healey, Rob Erdos, Stephan Carignan, Denis Tang, Long Truong, Andrew Langille, Kelvin Hamilton, Philippe Souvestre and Sion Jennings The current presentation is a broad overview of the Night Vision Assessment facilities at the Flight Research Laboratory of the Institute for Aerospace Research of the National Research Council of Canada. This unique cadre of researchers consists of a broad Canadian core group of research scientists, pilots, operational military and law enforcement personnel and physicians that includes several government and private organizations. The initial portion of the presentation shall address the overall assessment of three tube technologies using flight and laboratory methods. This is a summary of recent laboratory and flight trials with the Canadian Forces. The results shall be summarized in terms of laboratory derived estimates of visual acuity, and in-flight test trials by NRC pilots using these night vision technologies. We will discuss this methodological approach as a model and example for assessment of night vision technologies for use in real operational flight environments. Further, these results will consider the importance of providing nominal behavioural performance values for flight technologies. The second portion of our presentation will consider the broader capabilities and activities of this core night vision research group at the Flight Research. We will discuss current and planned efforts aimed at developing civil certification policies for the use of NVGs, suppression of forest fires using NVGs, the use of NVGs for Canadian law enforcement air wings, and the use of medical procedures to assess the impact of NVGs and related technologies in flight environments. The final phase of the talk will summarize our capability as a unique flight research facility capable of supporting a broad range of military and paramilitary needs. FRL can be considered as a distinctive Canadian research facility capable of supporting a broad range of operational flight needs in defence, law enforcement and related civilian environments.}, address = {Comox, Brittish Colombia}, author = {Macuda, T. and Craig, G. and Brulotte, M. and Allison, R.S. and Crowell, B. and Filiter, D. and Lester, G. and Healey, G. and Erdos, R. and Carignan, S. and Tang, D. and Truong, L. and Langille, A. and Hamilton, K. and Souvestre, P. and Jennings, S.}, booktitle = {Comox Night Vision Conference}, date-added = {2011-05-06 16:44:18 -0400}, date-modified = {2011-05-18 15:40:23 -0400}, keywords = {Night Vision}, month = {11}, title = {A Multidisciplinary Approach Towards Assessing Night Vision Technologies at the Flight Research Laboratory: Integrating Laboratory, Field, Flight and Aeromedical approaches}, url-1 = {https://percept.eecs.yorku.ca/papers/Nightvisionconference comox.pdf}, year = {2006}}
@incollection{Sakano:2006gd, abstract = {1. Introduction Theoretically, there are at least two possible binocular cues to motion-in-depth, namely disparity change over time and interocular velocity differences. We previously reported that a motion aftereffect (MAE) in depth occurred after adaptation to motion-in-depth in random-element stereograms that contained interocular velocity differences. Moreover, the duration of MAE in depth following adaptation stimuli without spatially coherent disparities did not differ significantly from that following adaptation to stimuli with coherent disparities (VSS 2005). It is possible that equivalent duration of the MAEs in depth under these two conditions reflects saturation of MAE in depth caused by the long adaptation phase (2 min). In the present study, we test this directly via measurements of the duration of MAE in depth for a variety of adaptation durations. 2. Methods The adaptation stimulus consisted of random-dot stereograms that depicted two frontoparallel planes, one above and one below the fixation point. The two planes repeatedly moved in depth in opposite directions for 7.5 sec, 15 sec, 30 sec, 1 min, 2 min or 4 min. The dots of the adaptation stimulus were spatially and temporally correlated in the two eyes (RDS) or spatially uncorrelated but temporally correlated (URDS). Thus, both RDS and URDS contained the same amount of interocular velocity differences while only RDS had coherent disparity. The test stimulus was a stationary version of the spatially and temporally correlated adaptation stimulus. The subjects pressed a key when the illusory motion-in-depth of the test stimulus (MAE in depth) ceased. 3. Results and discussion Under both RDS and URDS adaptation conditions, a MAE in depth occurred. The duration of the MAE in depth increased as the adaptation duration increased. On the other hand, there was no difference in the duration of the MAE in depth between the RDS and URDS adaptation conditions. These results support the idea that there are mechanisms to see motion-in-depth based on interocular velocity differences, and adaptation to interocular velocity differences, not to changing disparity, is responsible for MAE in depth. Acknowledgement The support of Province of Ontario (Premier's Research Excellence Award) and NSERC (Canada) are greatly appreciated.}, author = {Sakano, Y. and Allison, R.S.}, booktitle = {The Journal of the Vision Society of Japan (The 4th Asian Conference on Vision, Matsue, Shimane, Japan)}, date-added = {2011-05-06 16:42:39 -0400}, date-modified = {2013-12-28 15:01:04 +0000}, keywords = {Eye Movements & Tracking}, title = {The Effects of Adaptation Duration and Interocular Spatial Correlation of the Adaptation Stimulus on the Duration of Motion Aftereffect in Depth}, year = {2006}}
@incollection{allison200625-26, author = {Tsirlin-Zaharescu, I. and Wilcox, L. M. and Allison, R.S.}, booktitle = {Perception}, date-modified = {2011-09-12 22:06:29 -0400}, journal = {Perception}, keywords = {Stereopsis}, pages = {25-26}, title = {Perceptual asymmetry in stereoscopic transparency}, url-1 = {http://www.perceptionweb.com/abstract.cgi?id=v060587}, volume = {35}, year = {2006}}
@incollection{allison2006830-830, abstract = {Transparency presents an extreme challenge to stereoscopic correspondence and surface interpolation, particularly in the case of multiple transparent surfaces in the same visual direction. In this experiment we manipulate density, separation in depth, and number of transparent planes within a single experimental design, to evaluate the constraints on stereoscopic transparency. We use a novel task involving identification of patterned planes among the planes constituting the stimulus. The results show that, under these conditions, (1) subjects are able to perceive up to five transparent surfaces concurrently; (2) the transparency percept is impaired by increasing the texture density; (3) the transparency percept is initially enhanced by increasing the disparity between the surfaces; (4) the percept begins to degrade as disparity between surfaces is further increased beyond an optimal disparity, which is a function of element density. Specifically, at higher texture densities the optimal disparity shifts to smaller values. This interaction between disparity and texture density is surprising, but it can account for discrepancies in the existing literature. We are currently testing extended correlational and feature-based models of stereopsis with our stimuli. This will provide insight into our psychophysical results and a basis for quantitative evaluation of existing computational models.}, author = {Tsirlin, Inna and Allison, Robert S. and Wilcox, Laurie M.}, booktitle = {Journal of Vision}, date-modified = {2012-07-02 19:02:25 -0400}, doi = {10.1167/6.6.830}, journal = {Journal of Vision}, keywords = {Stereopsis}, number = {6}, pages = {830-830}, title = {On seeing transparent surfaces in stereoscopic displays}, url-1 = {http://dx.doi.org/10.1167/6.6.830}, volume = {6}, year = {2006}, url-1 = {https://doi.org/10.1167/6.6.830}}
@incollection{allison2006626-626, abstract = {Previously, we found that a motion aftereffect (MAE) in depth can occur after adaptation to motion-in-depth in random-element stereograms (VSS 2005). In the present study, we investigated the depth selectivity of the MAE in depth. The adaptation stimulus consisted of two frontoparallel surfaces, one above and one below the fixation point. These surfaces were depicted by random-dot stereograms that were temporally correlated (RDS) or uncorrelated (DRDS). During the 2-min adaptation phase, the disparity of one surface increased and that of the other surface decreased linearly and repeatedly to simulate smooth motion-in-depth. The range of these disparity ramps was -26.2 to -8.72, -8.72 to +8.72, or +8.72 to +26.2 arcmin, where positive and negative values indicate crossed and uncrossed disparity. The test stimulus consisted of two stationary frontoparallel surfaces depicted by a RDS with a fixed pedestal disparity of either -17.4, 0, or +17.4 arcmin. Under RDS adaptation conditions, robust MAE in depth occurred. The duration of this MAE in depth did not depend on the relation between the disparity range of the adaptation stimulus and the pedestal disparity of the test stimulus. Under DRDS adaptation conditions, MAE in depth did not occur. These results suggest that the adaptable processes used to detect motion-in-depth from binocular cues are insensitive to pedestal disparity.}, author = {Sakano, Yuichi and Allison, Robert S. and Howard, Ian P. and Sadr, Sabnam}, booktitle = {Journal of Vision}, date-modified = {2012-07-02 18:05:24 -0400}, doi = {10.1167/6.6.626}, journal = {Journal of Vision}, keywords = {Motion in depth}, number = {6}, pages = {626-626}, title = {Aftereffect of motion-in-depth based on binocular cues: No effect of relative disparity between adaptation and test surfaces}, url-1 = {http://dx.doi.org/10.1167/6.6.626}, volume = {6}, year = {2006}, url-1 = {https://doi.org/10.1167/6.6.626}}
@inproceedings{Macuda:2006mk, address = {San Francisco, CA}, author = {Macuda, T. and Poolman, P. and Schnell, T. and Keller, M. and Craig, G. and Swail, C. and Jennings, S. and Erdos, R. and Allison, R.S. and Lenert, A. and Carignan, S.}, booktitle = {Second International Conference on Augmented Cognition}, date-added = {2011-05-09 13:59:34 -0400}, date-modified = {2011-05-18 16:07:20 -0400}, keywords = {Neural Avionics}, title = {Neural Activity During a Simulated Rotary-Wing Flight Task Using Expert Test Pilots}, year = {2006}}
@inproceedings{Schnell:2006li, address = {San Francisco, CA}, author = {Schnell, T. and Poolman, P. and Macuda, T. and Craig, G. and Erdos, R. and Carignan, S. and Cheung, B. and Allison, R.S. and Lenert, A. and Jennings, S. and Swail, C. and Ellis, C. and Gubbels, W.}, booktitle = {Second International Conference on Augmented Cognition}, date-added = {2011-05-09 13:16:09 -0400}, date-modified = {2011-05-18 16:24:16 -0400}, keywords = {Neural Avionics}, month = {10}, title = {Toward the {``}Cognitive Cockpit{''}: Flight test platforms and methods for monitoring Pilot Mental State}, url-1 = {https://percept.eecs.yorku.ca/papers/0830best_Schnell_pres.pdf}, year = {2006}}
@techreport{Macuda:2006aw, author = {Macuda, T. and Craig, G. and Jennings, S. and Swail, C. and Carignan, S. and Erdos, R. and Nakagarawa, V. and Brulotte, M. and Healey, S. and Lester, G. and McAleavey, C. and Freeborn, E. and Allison, R.}, date-modified = {2016-01-03 03:29:39 +0000}, institution = {National Research Council Canada, NRC Institute for Aerospace Research}, keywords = {Misc.}, note = {NRC Institute for Aerospace Research FRL-2006-0054}, number = {FRL-2006-0054}, title = {Laser Eye Protection (LEP): Candidate Solutions and Understanding Their Impact on Visual and Flight Performance}, year = {2006}}
@techreport{Macuda:2006ol, author = {Macuda, T. and Craig, G. and Jennings, S. and Erdos, R. and Carignan, S. and Healey, S. and Miller, J. and Allison, R.}, date-modified = {2016-01-03 03:29:18 +0000}, institution = {National Research Council Canada, NRC Institute for Aerospace Research}, keywords = {Night Vision}, note = {NRC Institute for Aerospace Research FRL-2006-0008}, number = {FRL-2006-0008}, title = {Night Vision Goggle Standard Operating Procedures for the Ontario Ministry of Natural Resources}, year = {2006}}
@techreport{Macuda:2006kh, author = {Macuda, T. and Craig, G. and Erdos, R. and Carignan, S. and Jennings, S. and Swail, C. and Schnell, T. and Poolman, P. and Allison, R.S. and Lenert, A.}, date-modified = {2016-01-03 03:28:46 +0000}, institution = {NRC Institute for Aerospace Research, National Research Council Canada}, keywords = {Neural Avionics}, note = {NRC Institute for Aerospace Research FRL-2006-0050}, number = {FRL-2006-0050}, title = {Neural Avionics: Development of Airborne Neural Recording Capabilities in Fixed and Rotary Wing Aircraft to Monitor Pilot Mental State}, year = {2006}}
@article{allison20051003-11, abstract = {The sign of an accommodative response is provided by differences in chromatic aberration between under- and over-accommodated images. We asked whether these differences enable people to judge the depth order of two stimuli in the absence of other depth cues. Two vertical edges separated by an illuminated gap were presented at random relative distances. Exposure was brief, or prolonged with fixed or changing accommodation. The gap was illuminated with tungsten light or monochromatic light. Subjects could detect image blur with brief exposure for both types of light. But they could detect depth order only in tungsten light with long exposure, with or without changes in accommodation. [All rights reserved Elsevier]}, author = {Nguyen, V. A. and Howard, I. P. and Allison, R.S.}, date-modified = {2011-05-10 14:46:26 -0400}, doi = {10.1016/j.visres.2004.10.015}, journal = {Vision Research}, keywords = {Depth perception}, number = {8}, pages = {1003-11}, title = {Detection of the depth order of defocused images}, url-1 = {http://dx.doi.org/10.1016/j.visres.2004.10.015}, url-2 = {http://dx.doi.org/10.1016/j.visres.2004.10.015}, volume = {45}, year = {2005}, url-1 = {https://doi.org/10.1016/j.visres.2004.10.015}}
@article{allison20051025-32, abstract = {In order to measure the perceived direction of {`}up{'}, subjects judged the three-dimensional shape of disks shaded to be compatible with illumination from particular directions. By finding which shaded disk appeared most convex, we were able to infer the perceived direction of illumination. This provides an indirect measure of the subject's perception of the direction of {`}up{'}. The different cues contributing to this percept were separated by varying the orientation of the subject and the orientation of the visual background relative to gravity. We also measured the effect of decreasing or increasing gravity by making these shape judgements throughout all the phases of parabolic flight (0 g, 2 g and 1 g during level flight). The perceived up direction was modeled by a simple vector sum of {`}up{'} defined by vision, the body and gravity. In this model, the weighting of the visual cue became negligible under microgravity and hypergravity conditions.}, author = {Jenkin, H. L. and Dyde, R. T. and Zacher, J. E. and Zikovitz, D. C. and Jenkin, M. R. and Allison, R.S. and Howard, I. P. and Harris, L. R.}, date-modified = {2012-07-02 19:10:02 -0400}, doi = {10.1016/j.actaastro.2005.01.030}, journal = {Acta Astronaut}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {9-12}, pages = {1025-32}, title = {The relative role of visual and non-visual cues in determining the perceived direction of {`}up{'}: experiments in parabolic flight}, url-1 = {http://dx.doi.org/10.1016/j.actaastro.2005.01.030}, volume = {56}, year = {2005}, url-1 = {https://doi.org/10.1016/j.actaastro.2005.01.030}}
@incollection{Zacher:2005ap, author = {Zacher, J. E. and Allison, R.S. and Howard, I. P.}, booktitle = {ICVC Abstracts. International Conference on Visual Coding}, date-added = {2011-05-09 11:35:19 -0400}, date-modified = {2011-05-18 16:20:41 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {2}, pages = {6}, title = {The integration of movement on postrotatory nystagmus and illusionary body rotation}, volume = {95}, year = {2005}}
@incollection{Sakano:2005fd, address = {Toronto, Canada}, author = {Sakano, Y. and Allison, R.S. and Howard, I.P.}, booktitle = {CVR Vision Conference 2005, Computational Vision in Neural and Machine Systems}, date-added = {2011-05-09 11:20:50 -0400}, date-modified = {2011-05-18 16:06:11 -0400}, keywords = {Motion in depth}, month = {06}, organization = {Centre for Vision Research, York University}, title = {Motion aftereffect in depth induced by motion in depth based on binocular cues}, year = {2005}}
@incollection{Palmisano:2005gp, author = {Palmisano, S.A. and Allison, R.S. and Howard, I.P.}, booktitle = {Australian Journal of Psychology}, date-added = {2011-05-06 16:57:48 -0400}, date-modified = {2011-05-18 16:08:49 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, title = {Perceptions of Illusory Shearing in the Tumbling Room}, url-1 = {http://onlinelibrary.wiley.com/doi/10.1080/00049530600940005/pdf}, year = {2005}}
@incollection{Allison:2005lj, abstract = {Purpose. Perception of motion-defined form from apparent motion depends on the ability to detect and segregate regions of coherent motion. We investigated the effect of superimposed luminance noise on the ability to detect motion-defined form. Methods. Stimuli consisted of randomly textured squares that subtended 8.6 degrees of visual angle. The image sequences depicted either a motion-defined square subtending 4.3 degrees (the `target') or only the moving background. If the difference in the speed between the foreground dots and the background dots exceeded a certain threshold, the form of the foreground was visible. The images were rendered in Open GL at 100 Hz and displayed at 80\% contrast. Observers viewed the displays from 1.2 m with their head stabilized on a chin rest. For each trial, subjects were shown a pair of image sequences and required to indicate which sequence contained the target stimulus in a two-interval forced-choice procedure. Poisson distributed spatiotemporal image noise was added to both the background and foreground of the display. At each of a variety of stimulus speeds (20.1, 50.4 100.7, 201.4, and 302.2 arc min/second), we measured detection threshold as a function of stimulus signal to noise ratio. Results and discussion. All subjects could easily detect the motion-defined forms in the absence of any superimposed noise. As the power of spatiotemporal noise was increased, subjects had increased difficulty detecting the target. The influence of added noise was most pronounced at the lowest and highest image speeds. These results will be discussed in terms of models of motion processing and in the context of the usability of enhanced vision displays under noisy conditions. }, author = {Allison, R.S. and Macuda, T. and Jennings, S. and Thomas, P. and Guterman, P. and Craig, G.}, booktitle = {Vision Sciences Society Annual Meeting, Journal of Vision}, date-added = {2011-05-06 16:51:54 -0400}, date-modified = {2012-07-02 17:48:13 -0400}, doi = {10.1167/5.8.651}, keywords = {Night Vision}, number = {8}, organization = {Vision Sciences Society}, pages = {651a}, title = {Detection of motion-defined form in the presence of veiling noise}, url-1 = {http://dx.doi.org/10.1167/5.8.651}, volume = {5}, year = {2005}, url-1 = {https://doi.org/10.1167/5.8.651}}
@incollection{allison2005732-732, abstract = {<U>Purpose.</U> Lateral motion aftereffects (MAEs) have been studied extensively. Less is known about MAEs in depth. We investigated whether adaptation to stimuli moving in depth induces MAEs in depth. <U>Methods.</U> The adaptation stimulus consisted of two frontoparallel planes, depicted by random-element stereograms, one above and one below the fixation point. The two planes repeatedly moved in depth in opposite directions for 2 minutes. The motion-in-depth was specified by interocular velocity differences and/or changing disparity by using the random elements which were spatially and temporally correlated in the two eyes (RDS), those which were spatially uncorrelated but temporally correlated (URDS), or those which were spatially correlated but temporally uncorrelated (DRDS). The test stimulus consisted of a RDS, URDS, DRDS or monocularly viewed random elements that did not move in depth. The subject pressed a key when any apparent motion in depth of the test stimulus ceased. <U>Results and discussion.</U> Under some conditions the test stimulus appeared to move in depth in the direction opposite to that of the adaptation stimulus (negative MAE). Specifically, adaptation to motion-in-depth of RDS and URDS produced MAEs in many test stimuli, while adaptation to DRDS produced little or no MAE in most test stimuli. While further experimentation is required, this finding suggests that adaptation to interocular velocity differences produces substantial MAEs in depth, but that adaptation to changing disparity produces little or no MAE. Also, a monocular test stimulus showed a MAE in a diagonal direction in depth. The depth component of the MAE under monocular test conditions indicates that binocular processes are involved in generating MAEs in depth.}, author = {Sakano, Yuichi and Allison, Robert S. and Howard, Ian P.}, booktitle = {Journal of Vision}, date-modified = {2012-07-02 18:05:39 -0400}, doi = {10.1167/5.8.732}, journal = {Journal of Vision}, keywords = {Motion in depth}, number = {8}, pages = {732-732}, title = {Aftereffects of motion in depth based on binocular cues}, url-1 = {http://dx.doi.org/10.1167/5.8.732}, volume = {5}, year = {2005}, url-1 = {https://doi.org/10.1167/5.8.732}}
@inproceedings{allison200521-31, abstract = {When a bright light source is viewed through Night Vision Goggles (NVG), the image of the source can appear enveloped in a {``}halo{''} that is much larger than the {``}weak-signal{''} point spread function of the NVG. The halo phenomenon was investigated in order to produce an accurate model of NVG performance for use in psychophysical experiments. Halos were created and measured under controlled laboratory conditions using representative Generation III NVGs. To quantitatively measure halo characteristics, the NVG eyepiece was replaced by a CMOS imager. Halo size and intensity were determined from camera images as functions of point-source intensity and ambient scene illumination. Halo images were captured over a wide range of source radiances (7 orders of magnitude) and then processed with standard analysis tools to yield spot characteristics. The spot characteristics were analyzed to verify our proposed parametric model of NVG halo event formation. The model considered the potential effects of many subsystems of the NVG in the generation of halo: objective lens, photocathode, image intensifier, fluorescent screen and image guide. A description of the halo effects and the model parameters are contained in this work, along with a qualitative rationale for some of the parameter choices.}, address = {Orlando, FL}, author = {Thomas, P. J. and Allison, R.S. and Carr, P. and Shen, E. and Jennings, S. and Macuda, T. and Craig, G. and Hornsey, R.}, booktitle = {Helmet- and Head-Mounted Displays X: Technologies and Applications}, date-modified = {2012-07-02 22:24:47 -0400}, doi = {10.1117/12.602736}, editor = {Rash, C. E.}, keywords = {Night Vision}, pages = {21-31}, publisher = {SPIE-Int Soc Optical Engineering}, series = {Proceedings of the Society of Photo-Optical Instrumentation Engineers (SPIE)}, title = {Physical modeling and characterization of the halo phenomenon in night vision goggles}, url-1 = {http://dx.doi.org/10.1117/12.602736}, url-2 = {http://dx.doi.org/10.1117/12.602736}, volume = {5800}, year = {2005}, url-1 = {https://doi.org/10.1117/12.602736}}
@inproceedings{allison20051-8, abstract = {Perception of motion-defined form is important in operational tasks such as search and rescue and camouflage breaking. Previously, we used synthetic Aviator Night Vision Imaging System (ANVIS-9) imagery to demonstrate that the capacity to detect motion-defined form was degraded at low levels of illumination (see Macuda et al., 2004; Thomas et al., 2004). To validate our simulated NVG results, the current study evaluated observer's ability to detect motion-defined form through a real ANVIS-9 system. The image sequences consisted of a target (square) that moved at a different speed than the background, or only depicted the moving background. For each trial, subjects were shown a pair of image sequences and required to indicate which sequence contained the target stimulus. Mean illumination and hence image noise level was varied by means of Neutral Density (ND) filters placed in front of the NVG objectives. At each noise level, we tested subjects at a series of target speeds. With both real and simulated NVG imagery, subjects had increased difficulty detecting the target with increased noise levels, at both slower and higher target speeds. These degradations in performance should be considered in operational planning. Further research is necessary to expand our understanding of the impact of NVG-produced noise on visual mechanisms}, address = {Orlando, FL, USA}, author = {Macuda, T. and Craig, G. and Allison, R.S. and Guterman, P. and Thomas, P. and Jennings, S.}, date-modified = {2012-07-02 22:26:09 -0400}, doi = {10.1117/12.602590}, keywords = {Night Vision}, pages = {1-8}, publisher = {SPIE-Int. Soc. Opt. Eng}, series = {Proc. SPIE - Int. Soc. Opt. Eng. (USA)}, title = {Detection of motion-defined form using night vision goggles}, url-1 = {http://dx.doi.org/10.1117/12.602590}, volume = {5800}, year = {2005}, url-1 = {https://doi.org/10.1117/12.602590}}
@inproceedings{allison200532-9, abstract = {Several methodologies have been used to determine resolution acuity through Night Vision Goggles. The present study compared NVG acuity estimates derived from the Hoffman ANV-126 and a standard psychophysical grating acuity task. For the grating acuity task, observers were required to discriminate between horizontal and vertical gratings according to a method of constant stimuli. Psychometric functions were generated from the performance data, and acuity thresholds were interpolated at a performance level of 70\% correct. Acuity estimates were established at three different illumination levels (0.06-5X10<sup>-4</sup> lux) for both procedures. These estimates were then converted to an equivalent Snellen value. The data indicate that grating acuity estimates were consistently better (i.e. lower scores) than acuity measures obtained from the Hoffman ANV-126. Furthermore significant differences in estimated acuity were observed using different tube technologies. In keeping with previous acuity investigations, although the Hoffman ANV-126 provides a rapid operational assessment of tube acuity, it is suggested that more rigorous psychophysical procedures such as the grating task described here be used to assess the real behavioural resolution of tube technologies}, address = {Orlando, FL, USA}, author = {Macuda, T. and Allison, R.S. and Thomas, P. and Truong, L. and Tang, D. and Craig, G. and Jennings, S.}, date-modified = {2012-07-02 22:25:27 -0400}, doi = {10.1117/12.602598}, keywords = {Night Vision}, pages = {32-9}, publisher = {SPIE-Int. Soc. Opt. Eng}, series = {Proc. SPIE - Int. Soc. Opt. Eng. (USA)}, title = {Comparison of three night vision intensification tube technologies on resolution acuity: results from Grating and Hoffman ANV-126 tasks}, url-1 = {http://dx.doi.org/10.1117/12.602598}, volume = {5800}, year = {2005}, url-1 = {https://doi.org/10.1117/12.602598}}
@inproceedings{allison200540-44, abstract = {Anecdotal reports by pilots flying with Night Vision Goggles (NVGs) in urban environments suggest that halos produced by bright light sources impact flight performance. The current study developed a methodology to examine the impact of viewing distance on perceived halo size. This was a first step in characterizing the subtle phenomenon of halo. Observers provided absolute size estimates of halos generated by a red LED at several viewing distances. Physical measurements of these halos were also recorded. The results indicated that the perceived halo linear size decreased as viewing distance was decreased. Further, the data showed that halos subtended a constant visual angle on the goggles (1°48', ±7') irrespective of distance up to 75'. This invariance with distance may impact pilot visual performance. For example, the counterintuitive apparent contraction of halo size with decreasing viewing distance may impact estimates of closure rates and of the spatial layout of light sources in the scene. Preliminary results suggest that halo is a dynamic phenomenon that requires further research to characterize the specific perceptual effects that it might have on pilot performance.}, address = {Orlando, FL, United States}, author = {Craig, Greg and Macuda, Todd and Thomas, Paul and Allison, Rob and Jennings, Sion}, date-modified = {2011-09-12 22:10:58 -0400}, doi = {10.1117/12.602543}, keywords = {Night Vision}, pages = {40-44}, publisher = {International Society for Optical Engineering, Bellingham WA, WA 98227-0010}, series = {Proceedings of SPIE - The International Society for Optical Engineering}, title = {Light source halos in Night Vision Goggles: Psychophysical assessments}, url-1 = {http://dx.doi.org/10.1117/12.602543}, volume = {5800}, year = {2005}, url-1 = {https://doi.org/10.1117/12.602543}}
@article{allison2004305-313, abstract = {Over what region of space are horizontal disparities integrated to form the stimulus for vergence? The vergence system might be expected to respond to disparities within a small area of interest to bring them into the range of precise stereoscopic processing. However, the literature suggests that disparities are integrated over a fairly large parafoveal area. We report the results of six experiments designed to explore the spatial characteristics of the stimulus for vergence. Binocular eye movements were recorded using magnetic search coils. Each dichoptic display consisted of a central target stimulus that the subject attempted to fuse, and a competing stimulus with conflicting disparity. In some conditions the target was stationary, providing a fixation stimulus. In other conditions, the disparity of the target changed to provide a vergence-tracking stimulus. The target and competing stimulus were combined in a variety of conditions including those in which (1) a transparent textured-disc target was superimposed on a competing textured background, (2) a textured-disc target filled the centre of a competing annular background, and (3) a small target was presented within the centre of a competing annular background of various inner diameters. In some conditions the target and competing stimulus were separated in stereoscopic depth. The results are consistent with a disparity integration area with a diameter of about 5degrees. Stimuli beyond this integration area can drive vergence in their own right, but they do not appear to be summed or averaged with a central stimulus to form a combined disparity signal. A competing stimulus had less effect on vergence when separated from the target by a disparity pedestal. As a result, we propose that it may be more useful to think in terms of an integration volume for vergence rather than a two-dimensional retinal integration area.}, author = {Allison, R.S. and Howard, I. P. and Fang, X. P.}, date-modified = {2012-07-02 19:10:51 -0400}, doi = {10.1007/s00221-003-1790-0}, journal = {Experimental Brain Research}, keywords = {Stereopsis}, number = {3}, pages = {305-313}, title = {The stimulus integration area for horizontal vergence}, url-1 = {http://dx.doi.org/10.1007/s00221-003-1790-0}, volume = {156}, year = {2004}, url-1 = {https://doi.org/10.1007/s00221-003-1790-0}}
@incollection{Nguyen:2004qw, abstract = {Purpose: An object nearer or further than the fixation point produces a blurred image. The magnitude and direction (sign) of image blur guides accommodation but it could also provide information about relative depth. We examined the contributions of active accommodation and stationary image blur to the precision of monocular judgments of relative depth. Methods: The test stimulus consisted of two vertical sharp edges presented to the right eye. The right edge was set at each of eleven distances, in front or behind the fixed left edge. The lateral distance between the edges was jittered about a mean value of 4 mm to minimise parallactic depth cue. Subjects prefixated coplanar edges at the fixed edge's distance. The test was presented replacing the fixation stimulus. In one condition, the test remained until the subject responded. In a second condition, the test appeared for only 0.2 sec, too short a period for accommodation. Subjects made forced-choice judgements of the depth order of the edges. Depth acuity thresholds were obtained by the method of constant stimuli. Results: The proportion of correct responses was plotted against the depth between the two edges. For long exposures, mean depth acuity (80\% correct responses) was 4 cm at a viewing distance of 37 cm. The threshold was higher in the short-duration condition. Conclusion: Monocular depth acuity from image blur was better than previously reported. Reasonably precise relative depth judgments were obtained by actively focussing between stimuli at different distances. Judgments were less precise when based on instantaneous blur. }, author = {Nguyen, V. A. and Howard, I. P. and Allison, R.S.}, booktitle = {Vision Sciences Society Annual Meeting, Journal of Vision}, date-added = {2011-05-06 17:00:03 -0400}, date-modified = {2012-07-02 18:00:35 -0400}, doi = {10.1167/4.8.461}, keywords = {Depth perception}, number = {8}, organization = {Vision Sciences Society}, pages = {461a}, title = {The contribution of image blur to depth perception}, url-1 = {http://dx.doi.org/10.1167/4.8.461}, volume = {4}, year = {2004}, url-1 = {https://doi.org/10.1167/4.8.461}}
@incollection{Nguyen:2004zp, address = {Washington, DC}, author = {Nguyen, V. and Howard, I.P and Allison, R.S.}, booktitle = {Program Society for Neuroscience Annual Meeting, No. 865.17. 2004 Abstract Viewer/Itinerary Planner}, date-added = {2011-05-06 16:58:33 -0400}, date-modified = {2011-05-18 16:08:18 -0400}, keywords = {Depth perception}, organization = {Society for Neuroscience}, title = {Object blur and monocular depth perception}, year = {2004}}
@incollection{allison200457-57, abstract = {The sign of an accommodative response is provided by differences in chromatic aberration between under- and over-accommodated images. We asked whether these differences enable people to judge the depth order of two stimuli in the absence of other depth cues. Two vertical test edges separated laterally by an illuminated gap were presented to one eye with one edge at random distances relative to the other fixed edge. The fixed edge was at the same distance as two coplanar pre-fixation edges. In one condition, exposure duration was brief so that accommodation could not change. In other conditions exposure was prolonged and subjects either continued to fixate the fixed edge or changed their accommodation between the two test edges. The gap was illuminated with tungsten light or monochromatic light. Subjects could detect image blur of about 0.3 D with brief exposure for both types of light. However, they could detect depth order only in tungsten light with long exposure, with or without changes in accommodation.}, author = {Howard, Ian P. and Nguyen, Vincent A. and Allison, Robert S.}, booktitle = {Journal of Vision}, date-modified = {2012-07-02 17:58:50 -0400}, doi = {10.1167/4.11.57}, journal = {Journal of Vision}, keywords = {Depth perception}, number = {11}, pages = {57-57}, title = {Detection of depth order from chromatic aberration of defocused images}, url-1 = {http://dx.doi.org/10.1167/4.11.57}, volume = {4}, year = {2004}, url-1 = {https://doi.org/10.1167/4.11.57}}
@incollection{allison2004146-146, author = {Allison, R.S. and Schumacher, J. and Herpers, R.}, booktitle = {Perception}, date-modified = {2011-09-12 21:58:56 -0400}, journal = {Perception}, keywords = {Eye Movements & Tracking}, pages = {146-146}, title = {Saccadic suppression of motion of the entire visual field}, url-1 = {http://www.perceptionweb.com/abstract.cgi?id=v040564}, volume = {33}, year = {2004}}
@inproceedings{Yan:2004ti, abstract = {In CAVE-like environments human locomotion is significantly restricted due to physical space and configural constraints. Interaction techniques based upon stepping in place have been suggested as a way to simulate long range locomotion. We describe a new method for step detection and estimation of forward walking speed and direction in an immersive virtual environment. To calibrate our system and to help in the modeling of the stepping behaviour, we collected motion capture data during real locomotion down a hallway while walking at different freely selected speeds, from very slow to very fast. From this data, the empirical relation between the forward speed of real walking and the trajectory of the leg motion during stepping was established. A simple model of stepping motion was fit for individual subjects. The model was used to estimate forward walking speed and direction from step characteristics during walking in place in a six-walled virtual environment. The system provides natural and effective simulated gait for interaction and travel within the virtual environment and provides the ability to study human locomotion and navigation in a CAVE-like environment.}, address = {Ames, Iowa}, author = {Yan, L. and Allison, R.S. and Rushton, S.K.}, booktitle = {8th Annual Immersive Projection Technology (IPT) Symposium}, date-added = {2011-05-06 13:30:27 -0400}, date-modified = {2011-05-18 16:07:33 -0400}, keywords = {Augmented & Virtual Reality}, month = {May 13th-14th}, read = {1}, title = {New Simple Virtual Walking Method -Walking on the Spot}, url-1 = {https://percept.eecs.yorku.ca/papers/New_Simple_Virtual_Walking_Method.pdf}, year = {2004}}
@inproceedings{Huang:2004kk, abstract = {Real-time gaze tracking is a promising interaction technique for virtual environments. Immersive projection-based virtual reality systems such as the CAVETM allow users a wide range of natural movements. Unfortunately, most head and eye movement measurement techniques are of limited use during free head and body motion. An improved head-eye tracking system is proposed and developed for use in immersive applications with free head motion. The system is based upon a head-mounted video-based eye tracking system and a hybrid ultrasound-inertial head tracking system. The system can measure the point of regard in a scene in real-time during relatively large head movements. The system will serve as a flexible testbed for evaluating novel gaze-contingent interaction techniques in virtual environments. The calibration of the head-eye tracking system is one of the most important issues that need to be addressed. In this paper, a simple view-based calibration method is proposed.}, address = {Seoul, Korea}, author = {Huang, H. and Allison R.S. and Jenkin, M.R.M}, booktitle = {ICAT'2004 14th International Conference on Artificial Reality and Telexistance}, date-added = {2011-05-06 13:26:18 -0400}, date-modified = {2011-05-18 15:45:14 -0400}, keywords = {Eye Movements & Tracking}, month = {November 30th- December 2nd}, title = {Combined Head - Eye Tracking for Immersive Virtual Reality}, url-1 = {https://percept.eecs.yorku.ca/papers/icat2004.pdf}, url-2 = {http://www.vrsj.org/ic-at/https://percept.eecs.yorku.ca/papers/2004/S3-3.pdf}, year = {2004}}
@inproceedings{allison200425-35, abstract = {Night vision devices are important tools that extend the operational capability of military and civilian flight operations. Although these devices enhance some aspects of night vision, they distort or degrade other aspects. Scintillation of the NVG signal at low light levels is one of the parameters that may affect pilot performance. We have developed a parametric model of NVG image scintillation. Measurements were taken of the output of a representative NVG at low light levels to validate the model and refine the values of the embedded parameters. A simple test environment was created using a photomultiplier and an oscilloscope. The model was used to create sequences of simulated NVG imagery that were characterized numerically and compared with measured NVG signals. The sequences of imagery are intended for use in laboratory experiments on depth and motion-in-depth perception}, address = {Orlando, FL, USA}, author = {Thomas, P. J. and Allison, R.S. and Jennings, S. and Yip, K. and Savchenko, E. and Tsang, I. and Macuda, T. and Hornsey, R.}, booktitle = {Helmet and Head-Mounted Displays IX: Technologies and Applications}, date-modified = {2012-07-02 22:06:44 -0400}, doi = {10.1117/12.542618}, editor = {Rash, C. E. and Reese, C. E.}, keywords = {Night Vision}, pages = {25-35}, publisher = {SPIE-Int. Soc. Opt. Eng}, series = {Proc. SPIE - Int. Soc. Opt. Eng. (USA)}, title = {Validation of synthetic imagery for night vision devices}, url-1 = {http://dx.doi.org/10.1117/12.542618}, volume = {5442}, year = {2004}, url-1 = {https://doi.org/10.1117/12.542618}}
@inproceedings{allison2004353-364, abstract = {A concept is described for the detection and location of transient objects, in which a {``}pixel-binary{''} CMOS imager is used to give a very high effective frame rate for the imager. The sensitivity to incoming photons is enhanced by the use of an image intensifier in front of the imager. For faint signals and a high enough frame rate, a single {``}image{''} typically contains only a few photon or noise events. Only the event locations need be stored, rather than the full image. The processing of many such {``}fast frames{''} allows a composite image to be created. In the composite image, isolated noise events can be removed, photon shot noise effects can be spatially smoothed and moving objects can be de-blurred and assigned a velocity vector. Expected objects can be masked or removed by differencing methods. In this work, the concept of a combined image intensifier/CMOS imager is modeled. Sensitivity, location precision and other performance factors are assessed. Benchmark measurements are used to validate aspects of the model. Options for a custom CMOS imager design concept are identified within the context of the benefits and drawbacks of commercially available night vision devices and CMOS imagers.}, address = {Ottawa, Canada}, author = {Thomas, P. J. and Allison, R.S. and Hornsey, R. and Wong, W.}, booktitle = {Photonics North: Applications of Photonic Technology 7b, Pts 1 and 2 - Closing the Gap between Theory, Development, and Application - Photonic Applications in Astronomy, Biomedicine, Imaging, Materials Processing, and Education}, date-modified = {2012-07-02 22:29:10 -0400}, doi = {10.1117/12.567712}, editor = {Armitage, J. C. and Lessard, R. A. and Lampropoulos, G. A.}, keywords = {Night Vision}, pages = {353-364}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of the Society of Photo-Optical Instrumentation Engineers (Spie)}, title = {Concept for image intensifier with CMOS imager output interface}, url-1 = {http://dx.doi.org/10.1117/12.567712}, volume = {5578}, year = {2004}, url-1 = {https://doi.org/10.1117/12.567712}}
@inproceedings{allison200417-24, address = {Grenoble, France}, author = {Schumacher, J. and Allison, R.S. and Herpers, R.}, booktitle = {Eurographic/{ACM} {SIGGRAPH} Symposium on Virtual Environments}, date-modified = {2011-05-11 13:32:42 -0400}, keywords = {Eye Movements & Tracking}, pages = {17-24}, title = {Using saccadic suppression to hide graphic updates}, url-1 = {https://percept.eecs.yorku.ca/papers/Schumacher-Using_Saccadic_Suppression.pdf}, url-2 = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.71.3306}, year = {2004}}
@inproceedings{allison200436-44, abstract = {The influence of Night Vision Goggle-produced noise on the perception of motion-defined form was investigated using synthetic imagery and standard psychophysical procedures. Synthetic image sequences incorporating synthetic noise were generated using a software model developed by our research group. This model is based on the physical properties of the Aviator Night Vision Imaging System (ANVIS-9) image intensification tube. The image sequences either depicted a target that moved at a different speed than the background, or only depicted the background. For each trial, subjects were shown a pair of image sequences and required to indicate which sequence contained the target stimulus. We tested subjects at a series of target speeds at several realistic noise levels resulting from varying simulated illumination. The results showed that subjects had increased difficulty detecting the target with increased noise levels, particularly at slower target speeds. This study suggests that the capacity to detect motion-defined form is degraded at low levels of illumination. Our findings are consistent with anecdotal reports of impaired motion perception in NVGs. Perception of motion-defined form is important in operational tasks such as search and rescue and camouflage breaking. These degradations in performance should be considered in operational planning.}, address = {Orlando, FL, United States}, author = {Macuda, Todd and Allison, Robert S. and Thomas, Paul and Craig, Greg and Jennings, Sion}, date-modified = {2012-07-02 22:28:38 -0400}, doi = {10.1117/12.542633}, keywords = {Night Vision}, pages = {36-44}, publisher = {International Society for Optical Engineering, Bellingham, WA}, series = {Proceedings of SPIE - The International Society for Optical Engineering}, title = {Detection of motion-defined form under simulated night vision conditions}, url-1 = {http://dx.doi.org/10.1117/12.542633}, volume = {5442}, year = {2004}, url-1 = {https://doi.org/10.1117/12.542633}}
@inproceedings{allison200422-29, abstract = {This paper describes a hybrid optical-inertial tracking technology for fully-immersive projective displays. In order to track the operator, the operator wears a 3DOF commercial inertial tracking system coupled with a set of laser diodes arranged in a known configuration. The projection of this laser constellation on the display walls are tracked visually to compute the 6DOF absolute head pose of the user. The absolute pose is combined with the inertial tracker data using an extended Kalman filter to maintain a robust estimate of position and orientation. This paper describes the basic tracking system including the hardware and software infrastructure.}, address = {London, ON}, author = {Hogue, A. and Jenkin, M. R. and Allison, R.S.}, booktitle = {1st Canadian Conference on Computer and Robot Vision, Proceedings}, date-modified = {2011-05-11 13:26:13 -0400}, doi = {10.1109/CCCRV.2004.1301417}, keywords = {Augmented & Virtual Reality}, pages = {22-29}, publisher = {Ieee Computer Soc,Los Alamitos}, title = {An optical-inertial tracking system for fully-enclosed VR displays}, url-1 = {http://dx.doi.org/10.1109/CCCRV.2004.1301417}, url-2 = {http://dx.doi.org/10.1109/CCCRV.2004.1301417}, year = {2004}, url-1 = {https://doi.org/10.1109/CCCRV.2004.1301417}}
@inproceedings{allison200477-82, abstract = {Abstract. During visual exploration of a scene human beings can be insensitive to even large changes in the scene when the eye is executing rapid or saccadic eye movements. In this contribution, this period of saccadic suppression was exploited to hide graphics updates in immersive environments. Two experiments were conducted. In the first experiment the general sensitivity of observers to trans -saccadic translations of large images of complex natural scenes was studied. It was found that trans -saccadic changes of up to 1.2 degrees of visual angle were seldom noticed during saccades with duration of at least 66 ms. In the second experiment, the perceived magnitude of trans -saccadic translation was compared to the perceived magnitude of image translation when no saccade was performed to determine the point of subjective equality. It was found that trans-saccadic displacements were perceived as approximately half as big as equivalent sized inter-saccadic displacements.}, author = {Herpers, R. and Schumacher, J. and Allison, R.S.}, booktitle = {Dynamische Perception 2004}, date-modified = {2011-05-11 13:32:43 -0400}, editor = {Ilg, U.J.}, keywords = {Eye Movements & Tracking}, pages = {77-82}, publisher = {Infix- IOS Press BV Amsterdam}, title = {Hiding graphic updates during long saccadic suppression periods}, url-1 = {https://percept.eecs.yorku.ca/papers/Herpers-Hiding-graphic-ups.pdf}, year = {2004}}
@inproceedings{allison2004375-381, abstract = {The incorporation of haptic interfaces into collaborative virtual environments is challenging when the users are geographically distributed. Reduction of latency is essential for maintaining realism, causality and the sense of co-presence in collaborative virtual environments during closely-coupled haptic tasks. In this study we consider the effects of varying amounts of simulated constant delay on the performance of a simple collaborative haptic task. The task was performed with haptic feedback alone or with visual feedback alone. Subjects were required to make a coordinated movement of their haptic displays as rapidly as possible, while maintaining a target simulated spring force between their end effector and that of their collaborator. Increasing simulated delay resulted in a decrease in performance, either in deviation from target spring force and in increased time to complete the task. At large latencies, there was evidence of dissociation between the states of the system that was observed by each of the collaborating users. This confirms earlier anecdotal evidence that users can be essentially seeing qualitatively different simulations with typical long distance network delays.}, address = {New York, NY 10036-5701, United States}, author = {Allison, Robert S. and Zacher, James E. and Wang, David and Shu, Joseph}, date-modified = {2012-01-19 19:52:56 -0500}, doi = {10.1145/1044588.1044670}, keywords = {Augmented & Virtual Reality}, pages = {375-381}, publisher = {Association for Computing Machinery}, series = {Proceedings {VRCAI} 2004 - {ACM} {SIGGRAPH} International Conference on Virtual Reality Continuum and its Applications in Industry}, title = {Effects of network delay on a collaborative motor task with telehaptic and televisual feedback}, url-1 = {http://dx.doi.org/10.1145/1044588.1044670}, year = {2004}, url-1 = {https://doi.org/10.1145/1044588.1044670}}
@inproceedings{allison2004167-178, abstract = {Convergence of the real or virtual stereoscopic cameras is an important operation in stereoscopic display systems. For example, convergence can shift the range of portrayed depth to improve visual comfort; can adjust the disparity of targets to bring them nearer to the screen and reduce accommodation-vergence conflict, or can bring objects of interest into the binocular field-of-view. Although camera convergence is acknowledged as a useful function, there has been considerable debate over the transformation required. It is well known that rotational camera convergence or 'toe-in' distorts the images in the two cameras producing patterns of horizontal and vertical disparities that can cause problems with fusion of the stereoscopic imagery. Behaviourally, similar retinal vertical disparity patterns are known to correlate with viewing distance and strongly affect perception of stereoscopic shape and depth. There has been little analysis of the implications of recent findings on vertical disparity processing for the design of stereoscopic camera and display systems. We ask how such distortions caused by camera convergence affect the ability to fuse and perceive stereoscopic images.}, address = {Bellingham}, author = {Allison, R.S.}, booktitle = {Stereoscopic Displays and Virtual Reality Systems XI}, date-modified = {2012-07-02 22:31:57 -0400}, doi = {10.1117/12.526278}, editor = {Woods, A. J. and Merritt, J. O. and Benton, S. A. and Bolas, M. T.}, keywords = {Stereopsis}, pages = {167-178}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of the Society of Photo-Optical Instrumentation Engineers (Spie)}, title = {The camera convergence problem revisited}, url-1 = {http://dx.doi.org/10.1117/12.526278}, url-2 = {http://dx.doi.org/10.1117/12.526278}, volume = {5291}, year = {2004}, url-1 = {https://doi.org/10.1117/12.526278}}
@article{allison20031028, author = {Palmisano, S. and Burke, D. and Allison, R.S.}, date-modified = {2011-05-11 13:15:55 -0400}, journal = {Perception}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {8}, pages = {1028}, title = {Erratum: Coherent perspective jitter induces visual illusions of self-motion (Perception (2003) 32 (97-110))}, url-1 = {http://www.ncbi.nlm.nih.gov/pubmed/12613789}, volume = {32}, year = {2003}}
@article{allison200397-110, abstract = {Palmisano et al (2000 Perception 29 57-67) found that adding coherent perspective jitter to constant-velocity radial flow improved visually induced illusions of self-motion (vection). This was a surprising finding, because unlike pure radial flow, this jittering radial flow should have generated sustained visual--vestibular conflicts--previously thought to always reduce/impair vection. We attempted to ascertain the essential stimulus features for this jitter advantage for vection by examining three novel types of jitter display. While adding incoherent jitter to radial flow was found to impair vection, adding coherent non-perspective jitter had little effect on this subjective experience (contrary to the notion that jitter improves vection by reducing adaptation to radial flow). Importantly, we found that coherent perspective jitter not only improves the vection induced by radial flow, but it also appears to induce modest vection by itself (demonstrating that vection can still occur when there is an extreme mismatch between actual and expected vestibular activity). These results suggest that the previously demonstrated advantage for coherent perspective jitter was due (in part at least) to jittering vection combining with forwards vection in depth to produce a more compelling overall vection experience.}, author = {Palmisano, S. and Burke, D. and Allison, R.S.}, date-modified = {2012-07-02 19:13:08 -0400}, doi = {10.1068/p3468}, journal = {Perception}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {1}, pages = {97-110}, title = {Coherent perspective jitter induces visual illusions of self-motion}, url-1 = {http://dx.doi.org/10.1068/p3468}, volume = {32}, year = {2003}, url-1 = {https://doi.org/10.1068/p3468}}
@article{allison20031933-8, abstract = {PURPOSE: The scleral search coil is widely regarded as the gold standard measurement technique for eye movements. The effect of wearing scleral search coils on human vision has not been systematically studied. However, there are anecdotal reports of degraded visual acuity, mild eye irritation, and an increase rise in intraocular pressure (IOP). The current study was conducted to investigate the effect of scleral search coil use on visual acuity and ocular integrity. METHODS: Six subjects were examined; all had previously worn search coils. Two drops of topical anesthetic were administered before insertion of the coils. Coils were inserted by hand and secured by applying mild pressure. The coils were removed after 45 minutes or on request of either the subject or the clinician. Before, during (at 15-minutes intervals), and after the coil was worn, the following measurements were taken for both eyes: tonometry (noncontact), corneal topography, biomicroscopic examination, visual acuity (monocular Snellen), and an eye-discomfort rating. RESULTS: Scleral coils produced a variety of effects, including ocular discomfort, hyperemia of the bulbar conjunctiva, increased IOP, buckling of the iris, grade 2 and 3 corneal staining, and reduction in visual acuity. Effects appeared as early as 15 minutes after insertion of the coils. All observed effects seemed to be transient and dissipated after coils were removed. CONCLUSIONS: Scleral coils may not be appropriate for all subjects. The findings suggest that there is a need for thorough subject prescreening and that clinicians should consider the risk/benefit ratio. Acute reduction in visual acuity may confound search coil findings. More research is needed to determine the maximum wearing time for properly screened subjects.}, author = {Irving, E. L. and Zacher, J. E. and Allison, R.S. and Callender, M. G.}, date-modified = {2012-07-02 19:20:49 -0400}, doi = {10.1167/iovs.01-0926}, journal = {Invest Ophthalmol Vis Sci}, keywords = {Eye Movements & Tracking}, number = {5}, pages = {1933-8}, title = {Effects of scleral search coil wear on visual function}, url-1 = {http://dx.doi.org/10.1167/iovs.01-0926}, volume = {44}, year = {2003}, url-1 = {https://doi.org/10.1167/iovs.01-0926}}
@article{allison20031879-93, abstract = {This paper examines and contrasts motion-parallax analogues of the induced-size and induced-shear effects with the equivalent induced effects from binocular disparity. During lateral head motion or with binocular stereopsis, vertical-shear and vertical-size transformations produced 'induced effects' of apparent inclination and slant that are not predicted geometrically. With vertical head motion, horizontal-shear and horizontal-size transformations produced similar analogues of the disparity induced effects. Typically, the induced effects were opposite in direction and slightly smaller in size than the geometric effects. Local induced-shear and induced-size effects could be elicited from motion parallax, but not from disparity, and were most pronounced when the stimulus contained discontinuities in velocity gradient. The implications of these results are discussed in the context of models of depth perception from disparity and structure from motion}, author = {Allison, R.S. and Rogers, B. J. and Bradshaw, M. F.}, date-modified = {2011-05-10 14:57:34 -0400}, doi = {10.1016/S0042-6989(03)00298-0}, journal = {Vision Research}, keywords = {Stereopsis}, number = {17}, pages = {1879-93}, title = {Geometric and induced effects in binocular stereopsis and motion parallax}, url-1 = {http://dx.doi.org/10.1016/S0042-6989(03)00298-0}, volume = {43}, year = {2003}, url-1 = {https://doi.org/10.1016/S0042-6989(03)00298-0}}
@incollection{Huang:2003zn, author = {Huang, J. and Schumacher, J. and Allison, R.S. and Zacher, J.E.}, booktitle = {CRESTech Innovation Network 2003}, date-added = {2011-05-09 11:22:27 -0400}, date-modified = {2011-05-18 16:31:38 -0400}, keywords = {Augmented & Virtual Reality}, title = {What are you looking at?}, url-1 = {https://percept.eecs.yorku.ca/papers/Huang-What_are_you_looking_at.pdf}, year = {2003}}
@inproceedings{allison2003179-87, abstract = {Six-sided fully immersive projective displays present complex and novel problems for tracking systems. Existing tracking technologies typically require tracking equipment that is placed in locations or attached to the user in a way that is suitable for typical displays of five or less walls but which would interfere with the immersive experience within a fully enclosed display. This paper presents a vision-based tracking technology for fully-immersive projective displays. The technology relies on the operator wearing a set of laser diodes arranged in a specific configuration and then visually tracking the projection of these lasers on the external walls of the display outside of the user's view. This approach places minimal hardware on the user and no visible tracking equipment is placed within the immersive environment. This paper describes the basic visual tracking system including the hardware and software infrastructure}, address = {Zurich, Switzerland}, author = {Hogue, A. and Robinson, M. and Jenkin, M. R. and Allison, R.S.}, date-modified = {2011-05-11 13:26:13 -0400}, doi = {10.1145/769953.769974}, keywords = {Augmented & Virtual Reality}, pages = {179-87}, publisher = {Eurographics Assoc}, series = {IPT/EGVE 2003. Seventh Immersive Projection Technology Workshop. Ninth Eurographics Workshop on Virtual Environments}, title = {A vision-based head tracking system for fully immersive displays}, url-1 = {http://dx.doi.org/10.1145/769953.769974}, year = {2003}, url-1 = {https://doi.org/10.1145/769953.769974}}
@article{allison20021071, author = {Palmisano, S. and Allison, R.S. and Howard, I. P.}, date-modified = {2012-07-02 19:29:45 -0400}, doi = {10.1016/S0042-6989(02)00046-9}, journal = {Vision Research}, keywords = {Stereopsis}, number = {8}, pages = {1071}, title = {Erratum: Effects of horizontal and vertical additive disparity noise on stereoscopic corrugation detection (Vision Research (2001) 41 (3133-3143) PII: S0042698901001833)}, url-1 = {http://dx.doi.org/10.1016/S0042-6989(02)00046-9}, volume = {42}, year = {2002}, url-1 = {https://doi.org/10.1016/S0042-6989(02)00046-9}}
@article{allison200275-85, abstract = {When people move there are many visual and non-visual cues that can inform them about their movement. Simulating self-motion in a virtual reality environment thus needs to take these non-visual cues into account in addition to the normal high-quality visual display. Here we examine the contribution of visual and non-visual cues to our perception of self-motion. The perceived distance of self-motion can be estimated from the visual flow field, physical forces or the act of moving. On its own, passive visual motion is a very effective cue to self-motion, and evokes a perception of self-motion that is related to the actual motion in a way that varies with acceleration. Passive physical motion turns out to be a particularly potent self-motion cue: not only does it evoke an exaggerated sensation of motion, but it also tends to dominate other cues}, author = {Harris, L. R. and Jenkin, M. R. and Zikovitz, D. and Redlick, F. and Jaekl, P. and Jasiobedzka, U. T. and Jenkin, H. L. and Allison, R.S.}, date-modified = {2012-07-02 19:14:32 -0400}, doi = {10.1007/s100550200008}, journal = {Virtual Reality}, keywords = {Augmented & Virtual Reality}, number = {2}, pages = {75-85}, title = {Simulating self-motion I: cues for the perception of motion}, url-1 = {http://dx.doi.org/10.1007/s100550200008}, volume = {6}, year = {2002}, url-1 = {https://doi.org/10.1007/s100550200008}}
@article{allison200286-95, abstract = {For pt.I see ibid., p.75-85 (2002). When simulating self-motion, virtual reality designers ignore non-visual cues at their peril. But providing non-visual cues presents significant challenges. One approach is to accompany visual displays with corresponding real physical motion to stimulate the non-visual, motion-detecting sensory systems in a natural way. However, allowing real movement requires real space. Technologies such as head mounted displays (HMDs) and CAVE™ can be used to provide large immersive visual displays within small physical spaces. It is difficult, however, to provide virtual environments that are as large physically as they are visually. A fundamental problem is that tracking technologies that work well in a small, enclosed environment do not function well over longer distances. Here we describe Trike-a `rideable' computer system that can be used to present large virtual spaces both visually and physically, and thus provide appropriately matched stimulation to both visual and non-visual sensory systems}, author = {Allison, R.S. and Harris, L. R. and Hogue, A. R. and Jasiobedzka, U. T. and Jenkin, H. L. and Jenkin, M. R. and Jaekl, P. and Laurence, J. R. and Pentile, G. and Redlick, F. and Zacher, J. and Zikovitz, D.}, date-modified = {2012-07-02 19:14:08 -0400}, doi = {10.1007/s100550200009}, journal = {Virtual Reality}, keywords = {Augmented & Virtual Reality}, number = {2}, pages = {86-95}, title = {Simulating self-motion II. A virtual reality tricycle}, url-1 = {http://dx.doi.org/10.1007/s100550200009}, volume = {6}, year = {2002}, url-1 = {https://doi.org/10.1007/s100550200009}}
@inbook{Allison:2002lc, address = {Toronto, Canada}, author = {Allison, R.S and Howard, IP.}, booktitle = {Seeing in Depth Volume {I}: {B}asic Mechanisms}, date-added = {2011-05-06 11:24:55 -0400}, date-modified = {2012-07-02 20:46:33 -0400}, doi = {10.1093/acprof:oso/9780195367607.001.0001}, editor = {I. Howard}, keywords = {Depth perception}, pages = {263-274}, publisher = {I. Porteous/Oxford}, title = {Models of Disparity Detectors}, url-1 = {http://dx.doi.org/10.1093/acprof:oso/9780195367607.001.0001}, year = {2002}, url-1 = {https://doi.org/10.1093/acprof:oso/9780195367607.001.0001}}
@incollection{Hogue:2002xc, address = {Toronto, Canada}, author = {Hogue, A. and Jenkin, M. and Allison, R.S. and Robinson, M. and Laurence, J. and Zacher, J.}, booktitle = {CRESTech Innovation Networking Conference}, date-added = {2011-05-09 14:14:34 -0400}, date-modified = {2011-09-12 21:48:40 -0400}, keywords = {Augmented & Virtual Reality}, month = {10}, title = {MARVIN: a Mobile Automatic Realtime Visual and INertial tracking system}, year = {2002}}
@incollection{Jaekl:2002ia, abstract = {Translation and rotation are detected by different patterns of optic flow and by different divisions of the vestibular system. A given movement (eg. yaw rotation or up/down translation) involves different sensors depending on the orientation of the movement with respect to gravity. Here we assess the contribution of these different sense systems to the ``whole system'' response to self motion. Our subjects' task was to distinguish self produced from external visual motion during rotation around the yaw, pitch and roll axes and during translation in the x (naso-occipital), y (sideways) and z (up and down) directions. The axis or direction of motion was parallel or orthogonal to the direction of gravity. Subjects wore a helmet-mounted display whose position was monitorred by a mechanical head tracker with minimal lag. The visual display was modified in response to head movement. The ratio between head and image motion was varied randomly using the method of constant stimuli. Subjects indicated whether the display appeared earth-stationary or not. For both rotation and translation there was a large range of ratios that was tolerated as perceptually stable. The ratio most likely to be accepted as stable corresponded to visual motion being faster than head motion. For rotation there were no consistent differences between yaw, pitch or roll axes and the orientation of the axis relative to gravity also had no effect. For translation motion in the x direction was on average matched with less visual motion than y or z motion. Although there was no consistent effect of whether motion was parallel or orthogonal to gravity, posture, relative to gravity, did have an effect. }, author = {Jaekl, P. M. and Allison, R.S. and Harris, L. R. and Jenkin, H. L. and Jenkin, M. R. and Zacher, J. E. and Zikovitz, D. C.}, booktitle = {Journal of Vision}, date-modified = {2012-07-02 17:59:29 -0400}, doi = {10.1167/2.7.508}, journal = {Journal of Vision}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {7}, pages = {508}, title = {Judging perceptual stability during active rotation and translation in various orientations}, url-1 = {http://dx.doi.org/10.1167/2.7.508}, volume = {2}, year = {2002}, url-1 = {https://doi.org/10.1167/2.7.508}}
@incollection{Harris:2002zn, abstract = {INTRODUCTION. Self movement generates retinal movement that is perceptually distinct from other movement. There are two types of models for how this distinction might be achieved. In the first, after self motion is detected, an internal estimate of the expected retinal movement is subtracted (a linear process) from retinal image movement. Remaining movement is interpretted as indicating external movement. In the second model, subjects internally compare observed visual motion with their internal representation: a non-linear ratio judgement which depends on the magnitude of the expected movement. A discriminable difference indicates external movement. These models respectively predict linear and non-linear distributions of the probability of regarding a given retinal motion as perceptually stable. METHODS. Our subjects' task was to distinguish self-produced from external visual motion during rotation around the yaw, pitch and roll axes and during translation in the x, y and z directions. They wore a helmet-mounted display whose position was monitorred by a mechanical head tracker with minimal lag. The visual display was modified in response to head movement. The ratio between head and image motion was adjusted by the subject until the display appeared earth-stationary. RESULTS. The distribution of ratios judged to be perceptually stable were fitted with a normal and a log normal distribution. For the rotation data a better fit was found using the log normal distribution suggesting that the non-linear ratio model is a better description of the underlying neural computations involved. No clear difference was found for the translation data. }, author = {Harris, L. R. and Allison, R.S. and Jaekl, P. M. and Jenkin, H. L. and Jenkin, M. R. and Zacher, J. E. and Zikovitz, D. C.}, booktitle = {Journal of Vision}, date-modified = {2012-07-02 17:59:00 -0400}, doi = {10.1167/2.7.509}, journal = {Journal of Vision}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {7}, pages = {509}, title = {Extracting self-created retinal motion}, url-1 = {http://dx.doi.org/10.1167/2.7.509}, volume = {2}, year = {2002}, url-1 = {https://doi.org/10.1167/2.7.509}}
@incollection{allison2002661-661, abstract = {Ogle's induced-size effect refers to the percept of slant elicited by a difference in vertical size between the left and right half images of a stereoscopic display. The effect is not readily predicted by the geometry of the situation and has been of considerable interest in the stereoscopic literature. Rogers and Koenderink (Nature, 322: 62-63) demonstrated that modulation of the vertical size of a monocular image during lateral head motion produces the impression of a surface slanted in depth - a motion-parallax analogue of the induced-size effect. We investigated motion parallax analogues of the induced-size and induced-shear effects further and compared them with the corresponding stereoscopic versions. During lateral head motion or with binocular stereopsis, vertical-shear and vertical-size transformations produced 'induced effects' of apparent inclination and slant that are not predicted geometrically. With vertical head motion, horizontal-shear and horizontal-size transformations produced similar analogues of the disparity induced effects. Typically, the induced effects were opposite in direction and slightly smaller than the geometric effects. For both stereopsis and motion parallax, relative slant and inclination were more pronounced when the stimulus contained discontinuities in disparity/velocity gradient than for continuous disparity/flow fields. The results have important implications for the processing of disparity and optic flow fields. The support of the McDonnell-Pew Centre for Cognitive Neuroscience is greatly appreciated.}, author = {Allison, R.S. and Rogers, B.J. and Bradshaw, M.F.}, booktitle = {Journal of Vision}, date-modified = {2012-07-02 17:39:17 -0400}, doi = {10.1167/2.7.661}, journal = {Journal of Vision}, keywords = {Stereopsis}, number = {7}, pages = {661-661}, title = {Induced effects in motion parallax}, url-1 = {http://dx.doi.org/10.1167/2.7.661}, volume = {2}, year = {2002}, url-1 = {https://doi.org/10.1167/2.7.661}}
@inproceedings{allison2002576-591, abstract = {In this paper we describe the motivation, design and implementation of a system to visually guide a locomoting robot towards a target and around obstacles. The work was inspired by a recent suggestion that walking humans rely on perceived egocentric direction rather than optic flow to guide locomotion to a target. We briefly summarise the human experimental work and then illustrate how direction based heuristics can be used in the visual guidance of locomotion. We also identify perceptual variables that could be used in the detection of obstacles and a control law for the regulation of obstacle avoidance. We describe simulations that demonstrate the utility of the approach and the implementation of these control laws on a Nomad mobile robot. We conclude that our simple biologically inspired solution produces robust behaviour and proves a very promising approach.}, address = {Berlin}, author = {Rushton, S. K. and Wen, J. and Allison, R.S.}, booktitle = {Biologically Motivated Computer Vision, Proceedings}, date-modified = {2011-05-11 13:11:27 -0400}, editor = {Bulthoff, H. H. and Lee, S. W. and Poggio, T. A. and Wallraven, C.}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, pages = {576-591}, publisher = {Springer-Verlag Berlin}, series = {Lecture Notes in Computer Science}, title = {Egocentric direction and the visual guidance of robot locomotion background, theory and implementation}, url-1 = {http://portal.acm.org/citation.cfm?id=751732}, volume = {2525}, year = {2002}}
@inproceedings{Robinson:2002xa, address = {Orlando, FL USA}, author = {Robinson, M. and Laurence, J. and Zacher, J. and Hogue, A. and Allison, R.S. and Jenkin, M. and Harris, L. R. and Stuerzlinger, W.}, booktitle = {7th Annual Immersive Projection Technology (IPT) Symposium}, date-modified = {2011-05-11 13:23:58 -0400}, keywords = {Augmented & Virtual Reality}, pages = {electronic proceedings}, title = {IVY: The Immersive Virtual environment at York}, url-1 = {https://percept.eecs.yorku.ca/papers/Robinson-IVY-Immersive_Visual_Environment_at_York.pdf}, year = {2002}}
@inproceedings{allison2002149-155, abstract = {Virtual reality displays introduce spatial distortions that are very hard to correct because of the difficulty of precisely modelling the camera from the nodal point of each eye. How significant are these distortions for spatial perception in virtual reality? In this study we used a helmet mounted display and a mechanical head tracker to investigate the tolerance to errors between head motions and the resulting visual display. The relationship between the head movement and the associated updating of the visual display was adjusted by subjects until the image was judged as stable relative to the world. Both rotational and translational movements were tested and the relationship between the movements and the direction of gravity was varied systematically. Typically, for the display to be judged as stable, subjects needed the visual world to be moved in the opposite direction of the head movement by an amount greater than the head movement itself, during both rotational and translational head movements, although a large range of movement was tolerated and judged as appearing stable. These results suggest that it not necessary to model the visual geometry accurately and suggest circumstances when tracker drift can be corrected by jumps in the display which will pass unnoticed by the user.}, address = {Los Alamitos}, author = {Jaekl, P. M. and Allison, R.S. and Harris, L. R. and Jasiobedzka, U. T. and Jenkin, H. L. and Jenkin, M. R. and Zacher, J. E. and Zikovitz, D. C.}, booktitle = {Ieee Virtual Reality 2002, Proceedings}, date-modified = {2011-05-11 13:23:59 -0400}, doi = {10.1109/VR.2002.996517}, editor = {Loftin, B. and Chen, J. X. and Rizzo, S. and Goebel, M. and Hirose, M.}, keywords = {Augmented & Virtual Reality}, pages = {149-155}, publisher = {Ieee Computer Soc}, series = {Proceedings of the Ieee Virtual Reality Annual International Symposium}, title = {Perceptual stability during head movement in virtual reality}, url-1 = {http://dx.doi.org/10.1109/VR.2002.996517}, year = {2002}, url-1 = {https://doi.org/10.1109/VR.2002.996517}}
@article{allison20013133-43, abstract = {Stereoscopic corrugation detection in the presence of horizontal- and vertical- additive disparity noise was examined using a signal detection paradigm. Random-dot stereograms either represented a 3-D square-wave surface with various amounts of Gaussian-distributed additive disparity noise or had the same disparity values randomly redistributed. Stereoscopic detection of 2 arcmin peak amplitude corrugations was found to tolerate significantly greater amplitudes of vertical-disparity noise than horizontal-disparity noise irrespective of whether the corrugations were horizontally or vertically oriented. However, this directional difference in tolerance to disparity noise was found to reverse when the corrugation and noise amplitudes were increased (so as to produce equivalent signal-to-noise ratios). These results suggest that horizontal- and vertical-disparity noise pose different problems for dot-matching and post-matching surface reconstruction as corrugation and noise amplitudes increase}, author = {Palmisano, S. and Allison, R.S. and Howard, I. P.}, date-modified = {2012-07-02 19:16:14 -0400}, doi = {10.1016/S0042-6989(01)00183-3}, journal = {Vision Research}, keywords = {Stereopsis}, number = {24}, pages = {3133-43}, title = {Effects of horizontal and vertical additive disparity noise on stereoscopic corrugation detection}, url-1 = {http://dx.doi.org/10.1016/S0042-6989(01)00183-3}, volume = {41}, year = {2001}, url-1 = {https://doi.org/10.1016/S0042-6989(01)00183-3}}
@incollection{Hogue:2001ce, address = {Nepean, Canada}, author = {Hogue, A. and Jenkin, M. and Allison, R.}, booktitle = {CITO Knowledge Network Conference: Beyond the Edge: Road Mapping Innovation}, date-added = {2011-05-09 14:22:17 -0400}, date-modified = {2011-09-12 22:18:19 -0400}, keywords = {Augmented & Virtual Reality}, month = {10}, title = {A Stereo-Vision based system for localization of the VR Trike}, year = {2001}}
@incollection{Zikovitz:2001vo, author = {Zikovitz, D.,C. and Jenkin, M. and Harris, L.R. and Allison, R.S. and Jaekl, P. and Jasiobedzka, U. and Zacher, J.E.}, booktitle = {Proceedings of the IRIS-PRECARN 11th Annual Conference}, date-added = {2011-05-09 11:28:26 -0400}, date-modified = {2012-10-12 15:11:14 +0000}, keywords = {Augmented & Virtual Reality}, title = {Perceptual Stability in Virtual Environments II: Stability during head translation}, year = {2001}}
@incollection{Palmisano:2001fm, address = {Toronto, Canada}, author = {Palmisano, S.A. and Allison, R.S. and Howard, I.P.}, booktitle = {International Conference on Levels of Perception}, date-added = {2011-05-09 11:27:11 -0400}, date-modified = {2011-05-22 13:47:58 -0400}, keywords = {Stereopsis}, title = {Are there temporal limits for post-matching stereoscopic processing?}, year = {2001}}
@incollection{Jasiobedzka:2001nz, author = {Jasiobedzka, U. and Jenkin, M. and Harris, L.R. and Allison, R.S. and Jaekl, P. and Zacher, J.E. and Jenkin, H. and Zikovitz, D.C.}, booktitle = {Proceedings of the IRIS-PRECARN 11th Annual Conference}, date-added = {2011-05-09 11:26:10 -0400}, date-modified = {2011-11-06 16:56:58 -0500}, keywords = {Augmented & Virtual Reality}, title = {Perceptual Stability in Virtual Environments III: Psychophysics in Microgravity}, year = {2001}}
@incollection{Jaekl:2001jf, author = {Jaekl, P.M. and Allison, R.S. and Harris, L.R. and Jasiobedzka, U.T. and Jenkin, H.L. and Jenkin, M.R. and Zacher, J.E. and Zikovitz, D.C.}, booktitle = {Proceedings of the IRIS-PRECARN 11th Annual Conference}, date-added = {2011-05-09 11:24:45 -0400}, date-modified = {2011-05-18 16:09:32 -0400}, keywords = {Augmented & Virtual Reality}, title = {Perceptual Stability in Virtual Environments I: Stability during rotation}, year = {2001}}
@incollection{Hogue:2001ea, author = {Hogue, A and Robinson, M and Jenkin, M and Allison, R.}, booktitle = {Proceedings of the IRIS-PRECARN 11th Annual Conference}, date-added = {2011-05-09 11:23:51 -0400}, date-modified = {2011-05-18 16:25:06 -0400}, keywords = {Augmented & Virtual Reality}, title = {Untethered, Wireless Pose Tracking for Virtual Reality}, year = {2001}}
@incollection{Palmisano:2001we, author = {Palmisano, S.A. and Allison, R.S. and Howard, I.P.}, booktitle = {Australian Journal of Psychology}, date-added = {2011-05-06 17:02:20 -0400}, date-modified = {2011-05-18 15:49:46 -0400}, keywords = {Stereopsis}, title = {Effects of Decorrelation on Stereoscopic Surface Perception with Static and Dynamic Random-dot Stereograms}, year = {2001}}
@inproceedings{Robinson:2001or, abstract = {When we move about within our environment, we are presented with a range of cues to our motion. Virtual reality systems attempt to simulate these various sensory cues. IVY -- the Immersive Visual environment at York -- is a virtual environment being constructed at York University to investigate aspects of human perception and to examine the relative importance of various visual and non-visual cues to the generation of an effective virtual environment. This paper describes the movation behind the design of IVY, and describes the design of the essential hardware and software components.}, address = {Tokyo}, author = {Robinson, M. and Laurence, J. and Zacher, J. and Hogue, A. and Allison, R.S. and Jenkin, M. and Harris, L. R. and Stuerzlinger, W.}, booktitle = {ICAT'2002 - 11th International Conference on Artificial Reality and Telexistance}, date-added = {2011-05-06 14:15:19 -0400}, date-modified = {2011-05-18 15:58:19 -0400}, keywords = {Augmented & Virtual Reality}, month = {Dec 5th-7th}, title = {Growing IVY: Building the Immersive Virtual environment at York}, url-1 = {https://percept.eecs.yorku.ca/papers/Robinson-Growing_IVY.pdf}, year = {2001}}
@inproceedings{allison2001247-254, abstract = {To enhance presence, facilitate sensory motor performance, and avoid disorientation or nausea, virtual-reality applications require the percept of a stable environment. End-end tracking latency (display lag) degrades this illusion of stability and has been identified as a major fault of existing virtual-environment systems. Oscillopsia refers to the perception that the visual world appears to swim about or oscillate in space and is a manifestation of this loss of perceptual stability of the environment. In this paper the effects of end-end latency and head velocity on perceptual stability in a virtual environment rr ere investigated psychophysically. Subjects became significantly more likely to report oscillopsia during head movements when end-end latency or head velocity were increased. It is concluded that perceptual instability of the world arises with increased head motion and increased display lag. Oscillopsia is expected to be more apparent in tasks requiring real locomotion or rapid head movement.}, address = {Yokohama, Japan}, author = {Allison, R.S. and Harris, L. R. and Jenkin, M. and Jasiobedzka, U. and Zacher, J. E.}, booktitle = {Ieee Virtual Reality 2001, Proceedings}, date-modified = {2011-05-11 13:24:02 -0400}, doi = {10.1109/VR.2001.913793}, editor = {Takemura, H. and Kiyokawa, K.}, keywords = {Augmented & Virtual Reality}, pages = {247-254}, publisher = {Ieee Computer Soc,Los Alamitos}, series = {Proceedings of the Ieee Virtual Reality Annual International Symposium}, title = {Tolerance of temporal delay in virtual environments}, url-1 = {http://dx.doi.org/10.1109/VR.2001.913793}, year = {2001}, url-1 = {https://doi.org/10.1109/VR.2001.913793}}
@misc{Allison:2001zi, address = {Toronto, Canada}, author = {Allison, R.S.}, booktitle = {CRESTECH/GEOIDE Enhanced/Synthetic Vision Systems Workshop}, date-added = {2011-05-09 14:19:39 -0400}, date-modified = {2012-02-20 21:10:37 -0500}, keywords = {Vergence}, month = {11}, title = {Optical Alignment and Vergence Issues in Hemet-Mounted Displays}, year = {2001}}
@article{allison2000124-132, abstract = {We measured the gain and phase of horizontal and vertical vergences of five subjects: as a function of stimulus area and position. Vergence eye movements were recorded by the scleral search coil method as subjects observed dichoptic displays oscillating in antiphase either from side to side or up and down with a peak-to-peak magnitude of 0.5 degrees at either 0.1 Hz or 1.0 Hz. The stimulus was a central textured disc with diameter ranging from 0.75 degrees to 65 degrees, or a peripheral annulus with outer diameter 65 degrees and inner diameter ranging from 5 degrees to 45 degrees. The remaining field was black. For horizontal vergence at both stimulus frequencies, gain and the phase lag were about the same for a 0.75 degrees stimulus as for a 65 degrees central stimulus. For vertical vergence, mean gain increased and mean phase lag decreased with increasing diameter of the central stimulus up to approximately 20 degrees. Thus, the stimulus integration area is much smaller for horizontal vergence than for vertical vergence. The integration area for vertical vergence is similar to that for cyclovergence, as revealed in a previous study. For both types of vergence, response gains were higher and phase lags smaller at 0.1 Hz than at 1.0 Hz. Also, gain decreased and phase lag increased with increasing occlusion of the central region of the stimulus. Vergence gain was significantly higher for a 45 degrees central disc than for a peripheral annulus with the same area. Thus, the central retina has more power to evoke horizontal or vertical vergence than the same area in the periphery. We compare the results with similar data for cyclovergence and discuss their ecological implications.}, author = {Howard, I. P. and Fang, X. P. and Allison, R.S. and Zacher, J. E.}, date-modified = {2012-07-02 19:19:49 -0400}, doi = {10.1007/s002210050014}, journal = {Experimental Brain Research}, keywords = {Vergence}, number = {2}, pages = {124-132}, title = {Effects of stimulus size and eccentricity on horizontal and vertical vergence}, url-1 = {http://dx.doi.org/10.1007/s002210050014}, volume = {130}, year = {2000}, url-1 = {https://doi.org/10.1007/s002210050014}}
@article{allison20002985-98, abstract = {We measured the ability to fuse dichoptic images of a horizontal line alone or in the presence of a textured background with different vertical disparity. Nonius-line measurements of vertical vergence were also obtained. Diplopia thresholds and vertical vergence gains were much higher in response to an isolated vertically disparate line than to one with a zero vertical-disparity background. The effect of the background was maximum when it was coplanar with the target and decreased with increasing relative horizontal disparity. We conclude that vertical disparities are integrated over a restricted range of horizontal disparities to drive vertical vergence}, author = {Allison, R.S. and Howard, I. P. and Fang, X.}, date-modified = {2011-05-11 13:24:56 -0400}, doi = {10.1016/S0042-6989(00)00150-4}, journal = {Vision Research}, keywords = {Vergence}, number = {21}, pages = {2985-98}, title = {Depth selectivity of vertical fusional mechanisms}, url-1 = {http://dx.doi.org/10.1016/S0042-6989(00)00150-4}, volume = {40}, year = {2000}, url-1 = {https://doi.org/10.1016/S0042-6989(00)00150-4}}
@article{allison20001869-86, abstract = {Observers viewed large dichoptic patterns undergoing smooth temporal modulations or step changes in simulated slant or inclination under various conditions of disparity-perspective cue conflict and concordance. After presentation of each test surface, subjects adjusted a comparison surface to match the perceived slant or inclination of the test surface. Addition of conflicting perspective to disparity affected slant and inclination perception more for brief than for long presentations. Perspective had more influence for smooth temporal changes than for step changes in slant or inclination and for surfaces presented in isolation rather than with a zero disparity frame. These results indicate that conflicting perspective information plays a dominant role in determining the temporal properties of perceived slant and inclination}, author = {Allison, R.S. and Howard, I. P.}, date-modified = {2011-05-11 13:19:35 -0400}, doi = {10.1016/S0042-6989(00)00034-1}, journal = {Vision Research}, keywords = {Stereopsis}, number = {14}, pages = {1869-86}, title = {Temporal dependencies in resolving monocular and binocular cue conflict in slant perception}, url-1 = {http://dx.doi.org/10.1016/S0042-6989(00)00034-1}, volume = {40}, year = {2000}, url-1 = {https://doi.org/10.1016/S0042-6989(00)00034-1}}
@article{allison20003823-7, abstract = {The authors measured the percept of changing depth from changing disparity in stereograms composed of random-dot textures that were either persistent or dynamically changed on every frame (a dynamic random-dot stereogram). Disparity was changed between frames to depict a surface undergoing smooth temporal changes in simulated slant. Matched depth was greater with dynamic random-dot stereograms than with persistent random-dot stereograms. These results confirm and extend earlier observations at depth threshold. The authors posit an explanation based on cue conflict between stereopsis and monocular depth cues}, author = {Allison, R.S. and Howard, I. P.}, date-modified = {2012-07-02 19:18:19 -0400}, doi = {10.1016/S0042-6989(00)00223-6}, journal = {Vision Research}, keywords = {Stereopsis}, number = {28}, pages = {3823-7}, title = {Stereopsis with persisting and dynamic textures}, url-1 = {http://dx.doi.org/10.1016/S0042-6989(00)00223-6}, volume = {40}, year = {2000}, url-1 = {https://doi.org/10.1016/S0042-6989(00)00223-6}}
@incollection{Palmisano:2000km, author = {Palmisano, S.A. and Allison, R.S. and Howard, I.P.}, booktitle = {Australian Journal of Psychology}, date-added = {2011-05-06 17:03:14 -0400}, date-modified = {2011-05-18 16:31:46 -0400}, keywords = {Stereopsis}, title = {What is the problem with binocular correspondence?}, url-1 = {http://onlinelibrary.wiley.com/doi/10.1080/00049530008255108/pdf}, year = {2000}}
@incollection{allison20003766B864, author = {Allison, R.S. and Howard, I. P. and Fang, X.}, booktitle = {Investigative Ophthalmology and Visual Science}, date-modified = {2011-09-12 21:55:52 -0400}, journal = {Investigative Ophthalmology and Visual Science}, keywords = {Vergence}, number = {4}, pages = {3766B864}, title = {Vertical fusional range increases with depth separation from competing stimuli}, volume = {41}, year = {2000}}
@inproceedings{Palmisano:2000wl, abstract = {Abstract: Stereoscopic surface detection of human and ideal observers was assessed using a signal detection paradigm. Signal displays were disparity defined sinusoidal or square wave corrugations in depth containing various amounts of additive disparity noise. Distracter displays were created by scrambling pure signal stimuli along the vertical dimension - destroying surface representation while leaving the depth range intact. Additive disparity noise was found to interfere with stereoscopic surface detection for both human and ideal observers. Efficiencies found for stereoscopic surface detection were similar to those found previously for detection of a single step edge in depth (a supposedly easier task).}, author = {Palmisano, S. and Allison, R.S. and Howard, I.P.}, booktitle = {Proceedings of the ICSC Symposia on Intelligent Systems and Applications}, date-added = {2011-05-06 14:19:04 -0400}, date-modified = {2011-05-22 18:01:55 -0400}, keywords = {Stereopsis}, pages = {1006-1012}, title = {Effect of noise on stereoscopic surface perception in humans and ideal observers}, url-1 = {https://percept.eecs.yorku.ca/papers/Palmisano-Effect_of_Disparity_Noise.pdf}, volume = {1}, year = {2000}}
@inproceedings{allison2000169-75, abstract = {Although technologies such as head-mounted displays and CAVEs can be used to provide large immersive visual displays within small physical spaces, it is difficult to provide virtual environments which are as large physically as they are visually. A fundamental problem is that tracking technologies which work well in a small enclosed environment do not function well over longer distances. In this paper, we describe Trike-a `rideable' computer system which can be used to generate and explore large virtual spaces both visually and physically. This paper describes the hardware and software components of the system and a set of experiments which have been performed to investigate how the different perceptual cues that can be provided with Trike interact within an immersive environment}, address = {New Brunswick, NJ, USA}, author = {Allison, R.S. and Harris, L. R. and Jenkin, M. and Pintilie, G. and Redlick, F. and Zikovitz, D. C.}, booktitle = {Proceedings IEEE Virtual Reality 2000}, date-modified = {2011-05-11 13:24:02 -0400}, doi = {10.1109/VR.2000.840495}, keywords = {Augmented & Virtual Reality}, pages = {169-75}, publisher = {IEEE Comput. Soc}, series = {Proceedings IEEE Virtual Reality 2000 (Cat. No.00CB37048)}, title = {First steps with a rideable computer}, url-1 = {http://dx.doi.org/10.1109/VR.2000.840495}, year = {2000}, url-1 = {https://doi.org/10.1109/VR.2000.840495}}
@misc{Allison:2000fi, author = {Allison, R.S. and Fang, X. and Howard, I. P.}, date-added = {2011-05-09 16:38:33 -0400}, date-modified = {2011-05-18 16:29:14 -0400}, howpublished = {Poster presented at the annual conference for the Association for Research in Vision and Ophthalmology, Fort Lauderdale, Florida}, keywords = {Vergence}, month = {05}, title = {Vertical fusional range increases with depth separation from competing stimuli}, year = {2000}}
@misc{Allison:2000vh, author = {Allison, R.S. and Jenkin, M.}, date-added = {2011-05-09 11:29:23 -0400}, date-modified = {2012-05-25 21:08:01 +0000}, howpublished = {Proceedings of the IRIS-PRECARN 10th Annual Conference, 6-7}, keywords = {Stereopsis}, title = {Stereopsis with the TRISH 2 robot head}, year = {2000}}
@article{allison1999299-306, abstract = {The effect of field size, velocity, and visual fixation upon the perception of self-body rotation and tilt was examined in a rotating furnished room. Subjects sat in a stationary chair in the furnished room which could be rotated about the body roll axis. For full-field conditions, complete 360 degrees body rotation (tumbling) was the most common sensation (felt by 80\% of subjects). Constant tilt or partial tumbling (less than 360 degrees rotation) occurred more frequently with a small field of view (20 deg). The number of subjects who experienced complete tumbling increased with increases in field of view and room velocity (for velocities between 15 and 30 degrees s\textsuperscript{-1}). The speed of perceived self-rotation relative to room rotation also increased with increasing field of view.}, author = {Allison, R.S. and Howard, I. P. and Zacher, J. E.}, date-modified = {2012-07-02 19:22:01 -0400}, doi = {10.1068/p2891}, journal = {Perception}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {3}, pages = {299-306}, title = {Effect of field size, head motion, and rotational velocity on roll vection and illusory self-tilt in a tumbling room}, url-1 = {http://dx.doi.org/10.1068/p2891}, volume = {28}, year = {1999}, url-1 = {https://doi.org/10.1068/p2891}}
@incollection{Rogers:1999dg, abstract = {When do we use optic flow and when do we use perceived direction to control locomotion? B J Rogers, R S Allison Optic-flow-field analyses have revealed that there are several sources of information to indicate the point of impact in a visual scene for a moving observer. Visual information alone, however, cannot indicate the heading direction, since heading is defined with respect to the observer. The importance of the distinction between the point of impact and the heading direction was brought out by Rushton et al [1998 Investigative Ophthalmology & Visual Science 39(4) S191] who showed that walking paths are determined primarily by the perceived direction of the target point. In contrast, many studies have shown that the point of impact can be detected with considerable precision by using a variety of different flow-field characteristics, so why doesn't optic flow play a more important role in controlling locomotion? Our results suggest that several factors are important. Locomotor paths are straighter when (i) there is local motion parallax between the intended target and objects at different distances; (ii) there is ground-plane texture and/or path markings; (iii) attention is directed towards the optic-flow cues. In addition, the extent to which we use flow-field information depends on the type of locomotion and the way in which heading direction is controlled by the observer. }, author = {Rogers, B.J. and Allison, R.S.}, booktitle = {Perception}, date-added = {2011-05-06 17:10:07 -0400}, date-modified = {2011-05-11 13:07:58 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, title = {When do we use optic flow and when do we use perceived direction to control locomotion?}, url-1 = {http://www.perceptionweb.com/ecvp99/0589.html}, volume = {28}, year = {1999}}
@incollection{Palmisano:1999eh, author = {Palmisano, S.A. and Allison, R.S. and Howard, I.P.}, booktitle = {Investigative Ophthalmology and Visual Science}, date-added = {2011-05-06 17:08:55 -0400}, date-modified = {2011-05-18 15:49:38 -0400}, keywords = {Stereopsis}, title = {Effects of decorrelation and disparity noise on stereoscopic surface}, volume = {40}, year = {1999}}
@incollection{Palmisano:1999br, abstract = {Stereoscopic detection and segregation of noisy transparent surfaces S Palmisano, R S Allison, I P Howard Random-dot stereograms depicting multiple transparent surfaces, lying at different depths, produce complex problems for the visual system. We investigated the perception of stereoscopic transparency with and without horizontal disparity noise. Stereoscopic displays depicted a surface with vertically oriented sinusoidal depth corrugations lying in front of, coplanar with, or behind a frontal plane surface. Gaussian-distributed disparity noise (standard deviations of 0, 2, 4, or 8 min of arc) was added to dots representing the sinusoid. In different conditions, subjects reported: (1) whether they saw the sinusoid or not (surface detection); (2) whether they saw both the plane and the sinusoid or not (surface segregation). While detection of the sinusoid was quite robust in the presence of substantial disparity noise (eg up to 2 - 4 min of arc), surface segregation degraded quickly. The depth order of the two transparent surfaces was important for surface segregation, which was achieved more readily when the plane was located in front of the sinusoid than when it was beyond or bisecting the sinusoid. The processes involved in segregating transparent surfaces would appear to be particularly susceptible to disparity noise--presumably owing to difficulties in distinguishing disparity discontinuities produced by transparency from those produced by noise.}, author = {Palmisano, S.A. and Allison, R.S. and Howard, I.P.}, booktitle = {Perception}, date-added = {2011-05-06 17:06:55 -0400}, date-modified = {2011-05-18 16:17:10 -0400}, keywords = {Stereopsis}, title = {Stereoscopic detection and segregation of noisy transparent surfaces}, url-1 = {http://www.perceptionweb.com/ecvp99/0156.html}, volume = {28}, year = {1999}}
@incollection{Allison:1999nq, author = {Allison, R.S. and Howard, I. P.}, booktitle = {Investigative Ophthalmology and Visual Science}, date-added = {2011-05-06 17:05:54 -0400}, date-modified = {2011-05-22 13:48:40 -0400}, doi = {10.1016/S0042-6989(00)00223-6}, keywords = {Depth perception}, title = {Depth perception with persisting and dynamic textures}, url-1 = {http://dx.doi.org/10.1016/S0042-6989(00)00223-6}, year = {1999}, url-1 = {https://doi.org/10.1016/S0042-6989(00)00223-6}}
@incollection{Allison:1999ce, abstract = {Depth perception with a stereoscopic robot head R Allison, M Jenkin TRISH-2 is a stereoscopic robot head that arose from the TRISH-1 platform. The robot consists of two computer-controlled CCD cameras acting as eyes. The cameras are mounted on motorised bases and have two extrinsic degrees of freedom. They can be independently panned (azimuth) under computer control. Torsion about the optic axis of each eye is achieved in software. The entire head can also be panned (azimuth) or tilted (elevation). Each camera provides additional optical degrees of freedom under computer control, with independent settings for focus, zoom, aperture, exposure, shutter speed, and video gain. Using TRISH-2, we investigated optimising the optical and rotational parameters for specific stereoscopic visual tasks. These techniques are often analogous to mechanisms proposed for biological vision. For example, Howard and Kaneko (1994 Vision Research 34 2505 - 2517) proposed a modified version of the deformation theory of inclination perception. Vertical shear disparity is averaged over the binocular field and used as the vertical disparity term in computing inclination from deformation disparity. To implement this theory, we use global cyclodisparity to set the torsional position of the eyes and then use deformation disparity to compute inclination. Torsional control of the stereo head improved efficiency of stereoscopic processing and enhanced performance for computing surface structure on inclined surfaces. Other analogies to biological stereoscopic mechanisms were considered as well as algorithms with no biological counterparts. }, author = {Allison, R.S. and Jenkin, M.}, booktitle = {Perception}, date-added = {2011-05-06 17:03:58 -0400}, date-modified = {2011-09-12 21:51:56 -0400}, keywords = {Stereopsis}, pages = {ECVP Abstract Supplement}, title = {Depth perception with a stereoscopic robot head}, url-1 = {http://www.perceptionweb.com/ecvp99/0264.html}, volume = {28}, year = {1999}}
@misc{Palmisano:1999oh, author = {Palmisano, S.A. and Allison, R.S. and Howard, I.P.}, date-added = {2011-05-09 17:06:28 -0400}, date-modified = {2011-05-18 15:49:27 -0400}, howpublished = {Poster presented at the annual conference of the Association for Research in Vision and Ophthalmology, (ARVO), Fort Lauderdale, Florida}, keywords = {Stereopsis}, month = {05}, title = {Effects of decorrelation and disparity noise on stereoscopic surface}, year = {1999}}
@misc{Fang:1999sy, author = {Fang, X. and Allison, R.S. and Howard, I.P.}, date-added = {2011-05-09 16:56:19 -0400}, date-modified = {2011-05-18 16:29:04 -0400}, howpublished = {Poster presented at the International Conference on Vision and Attention, York University, North York, Ontario}, keywords = {Vergence}, month = {06}, title = {Vertical fusional range increases with depth separation from competing stimuli}, year = {1999}}
@misc{Allison:1999rw, author = {Allison, R.S. and Howard, I. P.}, date-added = {2011-05-09 16:41:56 -0400}, date-modified = {2011-05-22 18:04:22 -0400}, howpublished = {Poster presented at the annual conference for the Association for Research in Vision and Ophthalmology}, keywords = {Depth perception}, month = {05}, title = {Depth perception with persisting and dynamic textures}, year = {1999}}
@misc{Allison:1999ez, author = {Allison, R.S. and Jenkin, M.}, date-added = {2011-05-09 16:40:44 -0400}, date-modified = {2011-05-22 13:50:48 -0400}, howpublished = {Poster presented at the European Conference on Visual Perception, Trieste, Italy}, keywords = {Stereopsis}, month = {08}, title = {Depth perception with a stereoscopic robot head}, year = {1999}}
@misc{Palmisano:1999ye, address = {North York, Ontario}, author = {Palmisano, S.A. and Howard, I.P. and Allison, R.S.}, booktitle = {International Conference on Vision and Attention}, date-added = {2011-05-09 11:30:17 -0400}, date-modified = {2012-05-25 21:10:32 +0000}, howpublished = {International Conference on Vision and Attention, North York, Ontario}, keywords = {Stereopsis}, title = {Stereo surface detection robust in presence of substantial disparity jitter}, year = {1999}}
@techreport{howardpwgsc, author = {Howard, I. P. and Palmisano, S.A. and Allison, R.S. and Fang, X.}, date-added = {2019-02-03 10:26:47 -0500}, date-modified = {2019-02-03 10:26:47 -0500}, institution = {PWGSC Report}, keywords = {Stereopsis}, number = {99:W7711-7-7393}, title = {Effects of noisy binocular disparity in stereoscopic virtual-reality systems}, year = {1999}}
@article{allison1998299-312, abstract = {The authors measured post-rotatory nystagmus and sensations of body rotation in standing subjects brought to rest in the dark after 3 minutes of each of the following conditions: (1) passive turning about the mid-body axis, involving only vestibular stimulation, (2) active turning about the mid-body axis, involving both: vestibular stimulation and motor-proprioceptive activity in the legs, and (3) stepping round while remaining facing in the same direction on the center of a rotating platform with the head held in a stationary holder (apparent turning), involving only motor-proprioceptive activity. The same acceleration-velocity profile was used in all conditions. Post-rotatory nystagmus (slow phase) occurred in the same direction to passive body turning and was reduced in velocity after active body turning, After apparent turning, nystagmus was in the opposite direction as attempted body turning. The authors' theoretical analysis suggests that nystagmus after active turning should conform to the mean of the responses after passive and apparent turning rather than to their sum. The results conform more closely to the mean than to the sum, but with greater weight given to vestibular inputs than to motor-proprioceptive inputs. Post-rotatory sensations of self-rotation were in the expected opposite direction after passive turning and were lower in magnitude after active turning. After apparent turning, sensations of self-rotation were in the same direction as those after attempted turning-an effect known as the antisomatogyral illusion}, author = {Howard, I. P. and Zacher, J. E. and Allison, R.S.}, date-modified = {2012-07-02 19:25:58 -0400}, doi = {10.1016/S0957-4271(97)00079-7}, journal = {Journal of Vestibular Research: Equilibrium and Orientation}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {4}, pages = {299-312}, title = {Post-rotatory nystagmus and turning sensations after active and passive turning}, url-1 = {http://dx.doi.org/10.1016/S0957-4271(97)00079-7}, volume = {8}, year = {1998}, url-1 = {https://doi.org/10.1016/S0957-4271(97)00079-7}}
@article{allison19981287-304, abstract = {Linear transformations (shear or scale transformations) of either horizontal or vertical disparity give rise to the percept of slant or inclination. It has been proposed that the percept of slant induced by vertical size disparity, known as Ogle's induced-size effect, and the analogous induced-shear effect, compensate for scale and shear distortions arising from aniseikonia, eccentric viewing, and cyclodisparity. We hypothesised that these linear transformations of vertical disparity are processed more slowly than equivalent transformations of horizontal disparity (horizontal shear and size disparity). We studied the temporal properties of the stereoscopic slant and inclination percepts that arose when subjects viewed stereograms with various combinations of horizontal and vertical size or shear disparities. We found no evidence to support our hypothesis. There were no clear differences in the build-up of percepts of slant or inclination induced by step changes in horizontal size or shear disparity and those induced by step changes in vertical size or shear disparity. Perceived slant and inclination decreased in a similar manner with increasing temporal frequency for modulations of transformations of both horizontal and vertical disparity. Considerable individual differences were found and several subjects experienced slant reversal, particularly with oscillating stimuli. An interesting finding was that perceived slant induced by modulations of dilation disparity was in the direction of the vertical component. This suggests the vertical size disparity mechanism has a higher temporal bandwidth than the horizontal size disparity mechanism. However, conflicting perspective information may play a dominant role in determining the temporal properties of perceived slant and inclination.}, author = {Allison, R.S. and Howard, I. P. and Rogers, B. J. and Bridge, H.}, date-modified = {2012-07-02 19:22:46 -0400}, doi = {10.1068/p271287}, journal = {Perception}, keywords = {Stereopsis}, number = {11}, pages = {1287-304}, title = {Temporal aspects of slant and inclination perception}, url-1 = {http://dx.doi.org/10.1068/p271287}, url-2 = {http://dx.doi.org/10.1068/p271287}, volume = {27}, year = {1998}, url-1 = {https://doi.org/10.1068/p271287}}
@incollection{Howard:1998wq, author = {Howard, I.P. and Allison, R.S. and Howard, A.}, booktitle = {Investigative Ophthalmology and Visual Science}, date-added = {2011-05-06 17:17:20 -0400}, date-modified = {2011-05-18 15:46:16 -0400}, keywords = {Depth perception}, number = {4}, read = {1}, title = {Depth from moving uncorrelated random-dot displays}, volume = {39}, year = {1998}}
@incollection{Fang:1998iq, author = {Fang, X. P. and Howard, I. P. and Allison, R.S. and Zacher, J. E.}, booktitle = {Investigative Ophthalmology and Visual Science}, date-added = {2011-05-06 17:16:11 -0400}, date-modified = {2011-05-18 15:51:37 -0400}, keywords = {Vergence}, number = {4}, title = {Effects of stimulus size and eccentricity on horizontal vergence}, volume = {39}, year = {1998}}
@incollection{Allison:1998mm, author = {Allison, R.S. and Howard, I. P.}, booktitle = {Investigative Ophthalmology and Visual Science}, date-added = {2011-05-06 17:15:03 -0400}, date-modified = {2011-05-22 13:54:43 -0400}, keywords = {Stereopsis}, pages = {4}, title = {Disparity-perspective interactions in slant perception}, volume = {39}, year = {1998}}
@incollection{Allison:1998yu, abstract = {Motion in depth can be elicited by dichoptically uncorrelated textures R S Allison, I P Howard, A Howard Opposed motion of the stereoscopic half-images of an object evokes a compelling percept of motion in depth. This percept could arise from positional disparity or from interocular differences in motion signals. Correlated dynamic random-dot stereograms have been used to dissociate position and motion disparity. We have taken a different approach using uncorrelated random-dot displays. The stimulus consisted of two random-dot displays, one just above a central fixation point and a second just below the fixation point. One of these served as the test image and the other as the comparison image. The test image was typically binocularly uncorrelated; the comparison image was correlated. The half-images of both displays oscillated horizontally in counterphase. The boundaries of each image were stationary, so that there were no moving deletion - accretion boundaries. Subjects adjusted the oscillation of the comparison display until its perceived velocity matched that of the test display. The effects of variation of dot density, dot lifetime, stimulus velocity, and oscillation frequency were studied. All subjects perceived strong apparent motion in depth in the uncorrelated display. Motion in depth was often accompanied by the appearance of sideways motion. No consistent impression of depth was obtained if the motion was stopped. Thus, dynamic depth can be created by changing disparity in a display with zero mean instantaneous disparity. We propose that the impression of motion in depth arises because of the consistent sign of changing disparity between randomly paired dots. }, author = {Allison, R.S. and Howard, I. P. and Howard, A.}, booktitle = {Perception}, date-added = {2011-05-06 17:11:50 -0400}, date-modified = {2014-09-26 00:17:41 +0000}, keywords = {Motion in depth}, title = {Motion in depth can be elicited by dichoptically uncorrelated textures}, url = {http://percept.eecs.yorku.ca/papers/ecvp 1998.pdf}, url-1 = {http://percept.eecs.yorku.ca/papers/ecvp%201998.pdf}, volume = {27}, year = {1998}, url-1 = {http://percept.eecs.yorku.ca/papers/ecvp%201998.pdf}}
@misc{zacher1998, author = {Zacher, J. E. and Irving, B.I. and Allison, R.S. and Callander, M.E}, date-added = {2011-05-09 17:08:07 -0400}, date-modified = {2011-05-18 15:50:44 -0400}, howpublished = {Poster presented at the annual meeting of the Association for Research in Vision and Ophthalmology, (ARVO), Fort Lauderdale, Florida}, keywords = {Eye Movements & Tracking}, month = {05}, title = {Effects of scleral search coil wear on visual function and acuity}, year = {1998}}
@misc{Fang:1998ft, author = {Fang, X. and Howard, I. P. and Allison, R.S. and Zacher, J.E.}, date-added = {2011-05-09 16:57:11 -0400}, date-modified = {2011-05-18 15:51:18 -0400}, howpublished = {Poster presented at the annual conference for the Association for Research in Vision and Ophthalmology, Fort Lauderdale, Florida}, keywords = {Vergence}, month = {04}, title = {Effects of stimulus size and eccentricity on horizontal vergence}, year = {1998}}
@misc{Allison:1998lt, author = {Allison, R.S. and Howard, I. P.}, date-added = {2011-05-09 16:44:51 -0400}, date-modified = {2011-05-22 18:04:58 -0400}, howpublished = {Poster presented at the annual conference for the Association for Research in Vision and Ophthalmology, Fort Lauderdale, Florida}, keywords = {Stereopsis}, month = {05}, title = {Disparity-Perspective Interactions in Slant Perception}, year = {1998}}
@misc{Allison:1998kc, author = {Allison, R.S. and Howard, I. P. and Howard, A.}, date-added = {2011-05-09 16:43:38 -0400}, date-modified = {2014-09-26 00:16:46 +0000}, howpublished = {Paper presented at the European Conference on Visual Perception, Oxford, UK}, keywords = {Motion in depth}, month = {08}, title = {Motion in depth can be elicited by dichoptically uncorrelated textures}, url = {http://percept.eecs.yorku.ca/papers/ecvp 1998.pdf}, url-1 = {http://percept.eecs.yorku.ca/papers/ecvp%201998.pdf}, year = {1998}, url-1 = {http://percept.eecs.yorku.ca/papers/ecvp%201998.pdf}}
@article{allison1997153-9, abstract = {We measured the gain and phase of vertical vergence in response to disjunctive vertical oscillations of dichoptic textured displays. The texture elements were m-scaled to equate visibility over the area of the display and were aperiodic and varied in shape so as to avoid spurious binocular matches. The display subtended 65 degrees and oscillated through peak-to-peak amplitudes from 18 arc min to 4 degrees at frequencies from 0.05 to 2 Hz - larger ranges than used in previous investigations. The gain of vergence was near 1 when the stimulus oscillated at 18 arc min at a frequency of 0.1 Hz or less. As the amplitude of stimulus oscillation increased from 18 arc min to 4 degrees, vergence gain decreased at all frequencies, which is evidence of a nonlinearity. Gain declined with increasing stimulus frequency but was still about 0.5 at 2 Hz for an amplitude of 18 arc min. Phase lag increased from less than 10 degrees at a stimulus frequency of 0.05 Hz to between 100 degrees and 145 degrees at 2 Hz. Overall, the dynamics of vertical vergence resemble the dynamics of horizontal vergence and cyclovergence.}, author = {Howard, I. P. and Allison, R.S. and Zacher, J. E.}, date-modified = {2012-07-02 19:27:24 -0400}, doi = {10.1007/PL00005735}, journal = {Exp Brain Res}, keywords = {Vergence}, number = {1}, pages = {153-9}, title = {The dynamics of vertical vergence}, url-1 = {http://dx.doi.org/10.1007/PL00005735}, volume = {116}, year = {1997}, url-1 = {https://doi.org/10.1007/PL00005735}}
@article{allison1997369-80, abstract = {The response of the vestibulo-ocular reflex following unilateral vestibular deafferentation by gentamicin ablation was studied using transient stimuli. The response to these rapid passive head turns showed a strong asymmetry with permanent, reduced gains toward the side of lesion. These gain reductions have large variation (gains of 0.26 to 0.83), which may result from preferential sparing of regularly firing afferent fibers following gentamicin ablation. Based on the size and nature of the nonlinearity, an explanation based on Ewald's second law was discounted.}, author = {Allison, R.S. and Eizenman, M. and Tomlinson, R. D. and Nedzelski, J. and Sharpe, J. A.}, date-modified = {2019-02-03 09:17:35 -0500}, doi = {10.1016/S0957-4271(96)00162-0}, journal = {J Vestib Res}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {5}, pages = {369--80}, title = {Vestibulo-ocular reflex deficits to rapid head turns following intratympanic gentamicin instillation}, url-1 = {http://dx.doi.org/10.1016/S0957-4271(96)00162-0}, volume = {7}, year = {1997}, url-1 = {https://doi.org/10.1016/S0957-4271(96)00162-0}}
@incollection{Howard:1997bl, address = {North York, Ontario}, author = {Howard, I.P. and Allison, R.S. and Groen, E. and Jenkin, H. and Zacher, J.E.}, booktitle = {International Conference on Vision and Action}, date-added = {2011-05-09 11:31:30 -0400}, date-modified = {2012-07-02 22:45:39 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, title = {Visually induced self inversion and levitation}, year = {1997}}
@incollection{Fang:1997sr, author = {Fang, X. P. and Howard, I. P. and Allison, R.S. and Zacher, J. E.}, booktitle = {Investigative Ophthalmology and Visual Science, 38(4): S986}, date-added = {2011-05-09 10:52:02 -0400}, date-modified = {2011-05-22 13:51:30 -0400}, keywords = {Vergence}, number = {4}, title = {Effects of field size on vertical vergence}, volume = {38}, year = {1997}}
@incollection{allison19974574-4574, author = {Fang, X. and Howard, I. P. and Allison, R.S. and Zacher, J. E.}, booktitle = {Investigative Ophthalmology and Visual Science}, date-modified = {2011-09-12 21:55:15 -0400}, journal = {Investigative Ophthalmology and Visual Science}, keywords = {Vergence}, number = {4}, pages = {4574-4574}, title = {Effects of stimulus size and eccentricity on vertical vergence}, volume = {38}, year = {1997}}
@incollection{allison19974252-4252, author = {Allison, R.S. and Howard, I. P. and Rogers, B. J. and Bridge, H.}, booktitle = {Investigative Ophthalmology and Visual Science}, date-modified = {2011-09-12 21:49:57 -0400}, journal = {Investigative Ophthalmology and Visual Science}, keywords = {Stereopsis}, number = {4}, pages = {4252-4252}, title = {Efficiency of slant and inclination perception as a function of temporal frequency}, volume = {38}, year = {1997}}
@incollection{allison19971, author = {Allison, R.S. and Howard, I. P. and Rogers, B. J. and Bridge, H.}, booktitle = {Perception}, date-modified = {2011-09-12 22:05:43 -0400}, journal = {Perception}, keywords = {Stereopsis}, number = {10}, pages = {1}, title = {Temporal characteristics of stereoscopic slant perception}, url-1 = {https://percept.eecs.yorku.ca/papers/ava 1997 conference.pdf}, volume = {26}, year = {1997}}
@misc{Howard:1997fv, author = {Howard, I.P. and Allison, R.S. and Groen, E. and Jenkin, H. and Zacher, J.E.}, date-added = {2011-05-09 17:01:03 -0400}, date-modified = {2011-05-18 16:24:38 -0400}, howpublished = {Paper presented at International Conference on Vision and Action, York University, North York, Ontario}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = {06}, title = {Tumbling and Levitation}, year = {1997}}
@misc{Fang:1997tf, author = {Fang, X. and Howard, I. P. and Allison, R.S. and Zacher, J.E.}, date-added = {2011-05-09 16:59:18 -0400}, date-modified = {2011-05-18 15:52:10 -0400}, howpublished = {Poster presented at the annual conference for the Association for Research in Vision and Ophthalmology, Fort Lauderdale, Florida}, keywords = {Vergence}, month = {04}, title = {Effects of stimulus size and eccentricity on vertical vergence}, year = {1997}}
@misc{Fang:1997tn, author = {Fang, X. and Howard, I.P. and Allison, R.S. and Zacher, J.E.}, date-added = {2011-05-09 16:58:06 -0400}, date-modified = {2011-05-18 15:52:22 -0400}, howpublished = {Poster presented at International Conference on Vision and Action, York University, North York, Ontario}, keywords = {Vergence}, month = {06}, title = {Effects of stimulus size and eccentricity on vertical vergence}, year = {1997}}
@misc{Allison:1997hy, author = {Allison, R.S. and Howard, I. P. and Rogers, B.J. and Bridge, H.}, date-added = {2011-05-09 16:48:04 -0400}, date-modified = {2011-05-18 15:53:12 -0400}, howpublished = {Poster presented at the annual conference for the Association for Research in Vision and Ophthalmology, Fort Lauderdale, Florida}, keywords = {Stereopsis}, month = {04}, title = {Efficiency of Slant and Inclination Perception as a Function of Temporal Frequency}, year = {1997}}
@misc{Allison:1997cg, author = {Allison, R.S.}, date-added = {2011-05-09 16:46:52 -0400}, date-modified = {2011-05-11 13:24:03 -0400}, howpublished = {Poster presented at the Institute for Space and Terrestrial Science AGM, Toronto, Ontario}, keywords = {Augmented & Virtual Reality}, month = {06}, title = {Vertical Disparity, Cue Conflict and Virtual Reality Displays}, year = {1997}}
@misc{Allison:1997wj, author = {Allison, R.S. and Howard, I. P. and Rogers, B.J. and Bridge, H}, date-added = {2011-05-09 16:45:59 -0400}, date-modified = {2011-05-18 16:17:37 -0400}, howpublished = {Poster presented at the AVA Conference on Depth Perception, Surrey, UK}, keywords = {Stereopsis}, month = {09}, title = {Temporal Aspects of Stereoscopic Slant Perception}, url-1 = {https://percept.eecs.yorku.ca/papers/ava 1997 conference.pdf}, year = {1997}}
@techreport{allison1997, author = {Howard, I. P. and Kaneko, H. and Pierce, B. and Allison, R.S. and Zacher, J. E.}, date-added = {2011-05-09 16:09:20 -0400}, date-modified = {2011-05-18 16:04:36 -0400}, institution = {PWGSC Report}, keywords = {Stereopsis}, number = {97:W7711-4-7217}, title = {Judgements of the inclination and slant of surfaces in stereoscopic display systems}, year = {1997}}
@article{allison19961073-1082, abstract = {We present a combined head-eye tracking system suitable for use with free head movement during natural activities. This system provides an integrated head and eye position measurement while allowing for a large range of head movement (approx 1.8 m of head translation is tolerated), Six degrees of freedom of head motion and two degrees of freedom of eye motion are measured by the system, The system was designed to be useful for the evaluation of the vestibule-ocular reflex (VOR), The VOR generates compensatory eye movements in order to stabilize gaze during linear or rotational motion of the head, Current clinical and basic research evaluation of the VOR has used a restricted range of head motion, mainly low-frequency, yaw rotation, An integrated eye-head tracking system such as the one presented here allows the VOR response to linear and angular head motion to be studied in a more physiologically relevant manner. Two examples of the utility of the integrated head and eye tracking system in evaluating the vestibular response to linear and angular motion are presented.}, author = {Allison, R.S. and Eizenman, M. and Cheung, B. S. K.}, date-modified = {2012-07-02 19:28:16 -0400}, doi = {10.1109/10.541249}, journal = {IEEE Transactions on Biomedical Engineering}, keywords = {Eye Movements & Tracking}, number = {11}, pages = {1073-1082}, title = {Combined head and eye tracking system for dynamic testing of the vestibular system}, url-1 = {http://dx.doi.org/10.1109/10.541249}, volume = {43}, year = {1996}, url-1 = {https://doi.org/10.1109/10.541249}}
@incollection{Tomlinson:1996mb, abstract = {The response of the vestibulo-ocular reflex following unilateral vestibular deafferentation by gentamicin ablation was studied using transient stimuli. The response to these rapid passive head turns showed a strong asymmetry with permanent, reduced gains toward the side of lesion. These gain reductions have large variation (gains of 0.26 to 0.83), which may result from preferential sparing of regularly firing afferent fibers following gentamicin ablation. Based on the size and nature of the nonlinearity, an explanation based on Ewald's second law was discounted.}, author = {Tomlinson, R.D. and Allison, R.S. and Eizenman, M. and Nedzelski, J. and Sharpe, J.A.}, booktitle = {Journal of Vestibular Research Supplement, 6(4S): S37}, date-added = {2011-05-09 10:54:58 -0400}, date-modified = {2011-05-18 16:31:25 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, title = {VOR deficits during rapid head turns following intratympanic gentamicin instillation}, volume = {6}, year = {1996}}
@incollection{allison1996S527, author = {Zacher, J. E. and Allison, R.S. and Howard, I. P.}, booktitle = {Investigative Ophthalmology and Visual Science}, date-modified = {2011-09-12 21:59:42 -0400}, journal = {Investigative Ophthalmology and Visual Science}, keywords = {Eye Movements & Tracking}, number = {3}, pages = {S527}, title = {Effects of visual illusions on eye movements}, volume = {37}, year = {1996}}
@incollection{allison1996S165, author = {Allison, R.S. and Howard, I. P. and Zacher, J. E. and Bridge, H.}, booktitle = {Investigative Ophthalmology and Visual Science}, date-modified = {2011-09-12 22:01:01 -0400}, journal = {Investigative Ophthalmology and Visual Science}, keywords = {Vergence}, number = {3}, pages = {S165}, title = {Dynamic response of the vertical vergence system}, volume = {37}, year = {1996}}
@misc{Zacher:1996jv, author = {Zacher, J. E. and Allison, R.S. and Howard, I. P.}, date-added = {2011-05-09 17:09:05 -0400}, date-modified = {2011-05-18 15:52:34 -0400}, howpublished = {Poster presented at the annual meeting of the Association for Research in Vision and Ophthalmology, (ARVO), Fort Lauderdale, Florida}, keywords = {Eye Movements & Tracking}, month = {04}, title = {Effects of Visual Illusions on Eye Movements}, year = {1996}}
@misc{Tomlinson:1996wl, author = {Tomlinson, R.D. and Allison, R.S. and Eizenman, M. and Nedzelski, J. and Sharpe, J.A.}, date-added = {2011-05-09 17:07:12 -0400}, date-modified = {2011-05-18 16:31:10 -0400}, howpublished = {Poster presented at the XIXth Meeting of the Barany Society, Sydney, Australia}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = {08}, title = {VOR Deficits During Rapid Head Turns Following Intratympanic Gentamicin Instillation}, year = {1996}}
@misc{Groen:1996vo, author = {Groen, E. and Jenkin, H. and Allison, R.S. and Zacher, J.E. and Howard, I.P.}, date-added = {2011-05-09 17:04:44 -0400}, date-modified = {2011-05-22 18:06:04 -0400}, howpublished = {Poster presented at the Institute for Space and Terrestrial Science Annual General Meeting, Toronto, Ontario}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = {09}, title = {The role of visual cues on spatial orientation in weightlessnes Neurolab E136}, year = {1996}}
@misc{Jenkin:1996tp, author = {Jenkin, H. and Zacher, J.E. and Allison, R.S. and Groen, E. and Howard, I.P.}, date-added = {2011-05-09 17:03:41 -0400}, date-modified = {2011-05-18 16:14:28 -0400}, howpublished = {Poster presented at the Institute for Space and Terrestrial Science AGM, Toronto, Ontario}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = {10}, title = {Role of visual cues in microgravity spatial orientation}, year = {1996}}
@misc{Fang:1996pt, author = {Fang, X. and Allison, R.S. and Zacher, J.E. and Howard, I.P.}, date-added = {2011-05-09 17:00:17 -0400}, date-modified = {2011-05-18 15:51:55 -0400}, howpublished = {Poster presented at the Institute for Space and Terrestrial Science Annual General Meeting , Toronto, Ontario}, keywords = {Vergence}, month = {09}, title = {Effects of stimulus size and eccentricity on vertical vergence}, year = {1996}}
@misc{Allison:1996ky, author = {Allison, R.S. and Zacher, J. E. and Howard, I. P.}, date-added = {2011-05-09 16:50:11 -0400}, date-modified = {2011-05-18 15:48:31 -0400}, howpublished = {Poster presented at the annual conference for the Association for Research in Vision and Ophthalmology, Fort Lauderdale, Florida}, keywords = {Vergence}, month = {04}, title = {Dynamic Response of the Vertical Vergence System}, year = {1996}}
@incollection{Allison:1995sa, author = {Allison, R.S. and Zacher, J. E. and Howard, I. P.}, booktitle = {ICVC Abstracts. International Conference on Visual Coding}, date-added = {2011-05-09 11:34:19 -0400}, date-modified = {2012-07-02 22:43:52 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {2}, pages = {1}, title = {Illusory self-tilt and roll-vection in a tumbling room}, volume = {95}, year = {1995}}
@incollection{Allison:1995xv, address = {Toronto, Canada}, author = {Allison, R.S. and Howard, I.P.}, booktitle = {Institute for Space and Terrestrial Science Showcase 1995: Networking for Profit}, date-added = {2011-05-09 11:32:59 -0400}, date-modified = {2012-07-02 22:45:34 -0400}, keywords = {Stereopsis}, title = {Stereoscopic vision in flight simulators}, year = {1995}}
@incollection{Zacher:1995sz, author = {Zacher, J. E. and Allison, R.S. and Howard, I. P.}, booktitle = {Investigative Ophthalmology and Visual Science, 36(4):S685}, date-added = {2011-05-09 10:49:24 -0400}, date-modified = {2011-05-18 16:18:40 -0400}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {4}, title = {The effect of active movement on postrotary nystagmus and illusory body rotation}, volume = {36}, year = {1995}}
@incollection{allison1995S829-S829, author = {Allison, R.S. and Zacher, J. E. and Howard, I. P. and Oman, C. M.}, booktitle = {Investigative Ophthalmology and Visual Science}, date-modified = {2011-09-12 21:55:01 -0400}, journal = {Investigative Ophthalmology and Visual Science}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {4}, pages = {S829-S829}, title = {The Effect of Field Size on Roll Vection in a Tumbling Room}, volume = {36}, year = {1995}}
@misc{Zacher:1995zi, author = {Zacher, J. E. and Allison, R.S. and Howard, I. P.}, date-added = {2011-05-09 17:12:03 -0400}, date-modified = {2011-05-18 16:20:55 -0400}, howpublished = {Poster presented at the International Conference on Visual Coding, (ICVC), North York, Ontario}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = {06}, title = {The integration of postrotatory nystagmus and illusionary body rotation}, year = {1995}}
@misc{Zacher:1995tz, author = {Zacher, J. E. and Allison, R.S. and Howard, I. P.}, date-added = {2011-05-09 17:09:54 -0400}, date-modified = {2011-05-18 16:18:30 -0400}, howpublished = {Poster presented at the annual meeting of the Association for Research in Vision and Ophthalmology, (ARVO), Fort Lauderdale, Florida}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = {05}, title = {The effect of active movement on postrotary nystagmus and illusory body rotation}, year = {1995}}
@misc{Allison:1995pc, author = {Allison, R.S. and Zacher, J. E. and Howard, I. P.}, date-added = {2011-05-09 16:52:10 -0400}, date-modified = {2011-05-22 18:06:40 -0400}, howpublished = {Paper presented at the annual conference for the Association for Research in Vision and Ophthalmology, Fort Lauderdale, Florida}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, month = {05}, title = {The effect of field size on roll vection in a tumbling room}, year = {1995}}
@techreport{Allison:1995fl, author = {Allison, R.S. and Zacher, J. and Howard, I. P.}, date-added = {2011-05-09 16:35:15 -0400}, date-modified = {2011-05-18 16:12:38 -0400}, institution = {NASA}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, number = {20040141722}, title = {Report on the effect of field size, head motion, and rotational velocity on roll vection and illusory self-tilt in a tumbling room}, year = {1995}}
@incollection{Allison:1994qe, abstract = {Rotational testing of the vestibulo-ocular reflex (VOR) does not always correlate with patients' symptoms. One possible reason is that conventional testing is performed at low frequencies and relatively low velocities thai do not correspond to the high frequency perrurbations encountered during locomotion. We present a combined head-eye tracking system suitable for use with free head movement during natural activities. The system was used to study the response to rapid passive head turns in nonnal subjects and patients with unilateral lesions. The patients have marked, persistent VOR deficits for rotation toward the side of lesion. The implications of these results on the organization of the nonnal VOR and the process of VOR compensation are discussed.}, address = {Toronto, Canada}, author = {Allison, R.S. and Eizenman, M. and Tomlinson, R.D. and Sharpe, J.A.}, booktitle = {Advances in Biomedical Engineering and Biosystems Science}, date-added = {2011-05-09 11:36:10 -0400}, date-modified = {2014-02-03 14:37:56 +0000}, keywords = {Eye Movements & Tracking}, month = {06}, title = {High frequency, high acceleration testing of the VOR}, year = {1994}}
@inproceedings{allison1994267-268, abstract = {Rotational testing of the vestibulo-ocular reflex (VOR) does not always correlate with patients' symptoms. One possible reason is that conventional testing is performed at low frequencies and relatively low velocities that do not correspond to the high frequency perturbations encountered during locomotion. The authors present a combined head-eye tracking system suitable for use with free head movement during natural activities. The system was used to study the response to rapid passive head turns in normal subjects and patients with unilateral lesions. The patients have marked, persistent VOR deficits for rotation toward the side of lesion. The implications of these results on the organization of the normal VOR and the process of VOR compensation are discussed}, address = {New York}, author = {Allison, R.S. and Eizenman, M. and Tomlinson, R. D. and Sharpe, J. and Frecker, R. C. and Anderson, J. and McIlmoy, L.}, booktitle = {Proceedings of the 16th Annual International Conference of the IEEE Engineering in Medicine and Biology Society - Engineering Advances: New Opportunities for Biomedical Engineers, Pts 1 and 2}, date-modified = {2014-02-03 14:36:42 +0000}, doi = {10.1109/IEMBS.1994.412052}, editor = {Sheppard, N. F. and Eden, M. and Kantor, G.}, keywords = {Eye Movements & Tracking}, pages = {267-268}, publisher = {IEEE}, title = {Head and Eye Tracking for Study of the VOR During Natural Head Motion}, url-1 = {http://dx.doi.org/10.1109/IEMBS.1994.412052}, year = {1994}, url-1 = {https://doi.org/10.1109/IEMBS.1994.412052}}
@article{Harris:2024aa, author = {Laurence Harris and Bj{\"o}rn J{\"o}rges and Nils Bury and Meaghan McManus and Ambika Bensal and Robert Allison and Michael Jenkin}, date-added = {2024-03-04 22:15:18 -0500}, date-modified = {2024-03-04 22:16:56 -0500}, journal = {NPG Microgravity}, keywords = {Optic flow & Self Motion (also Locomotion & Aviation)}, title = {The Effects of Long-Term Exposure to Microgravity and Body Orientation Relative to Gravity on Perceived Traveled Distance}, year = {in press}}