Bike-X  0.8
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
OVR_SensorFusion.cpp
Go to the documentation of this file.
1 /************************************************************************************
2 
3 Filename : OVR_SensorFusion.cpp
4 Content : Methods that determine head orientation from sensor data over time
5 Created : October 9, 2012
6 Authors : Michael Antonov, Steve LaValle, Dov Katz, Max Katsev, Dan Gierl
7 
8 Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
9 
10 Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
11 you may not use the Oculus VR Rift SDK except in compliance with the License,
12 which is provided at the time of installation or download, or which
13 otherwise accompanies this software in either electronic or hard copy form.
14 
15 You may obtain a copy of the License at
16 
17 http://www.oculusvr.com/licenses/LICENSE-3.1
18 
19 Unless required by applicable law or agreed to in writing, the Oculus VR SDK
20 distributed under the License is distributed on an "AS IS" BASIS,
21 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22 See the License for the specific language governing permissions and
23 limitations under the License.
24 
25 *************************************************************************************/
26 
27 #include "OVR_SensorFusion.h"
28 #include "Kernel/OVR_Log.h"
29 #include "Kernel/OVR_System.h"
30 #include "OVR_JSON.h"
31 #include "OVR_Profile.h"
32 #include "OVR_Stereo.h"
33 #include "OVR_Recording.h"
34 
35 // Temporary for debugging
36 bool Global_Flag_1 = true;
37 
38 //Convenient global variables to temporarily extract this data.
42 bool TPH_IsPositionTracked = false;
43 
44 
45 namespace OVR {
46 
48 
49 //-------------------------------------------------------------------------------------
50 // ***** Sensor Fusion
51 
53  : ExposureRecordHistory(100), LastMessageExposureFrame(NULL),
54  FocusDirection(Vector3d(0, 0, 0)), FocusFOV(0.0),
55  FAccelInImuFrame(1000), FAccelInCameraFrame(1000), FAngV(20),
56  EnableGravity(true), EnableYawCorrection(true), MagCalibrated(false),
57  EnableCameraTiltCorrection(true),
58  MotionTrackingEnabled(true), VisionPositionEnabled(true),
59  CenterPupilDepth(0.0)
60 {
61  pHandler = new BodyFrameHandler(this);
62 
63  // And the clock is running...
64  LogText("*** SensorFusion Startup: TimeSeconds = %f\n", Timer::GetSeconds());
65 
66  if (sensor)
67  AttachToSensor(sensor);
68 
69  Reset();
70 }
71 
73 {
74  delete(pHandler);
75 }
76 
78 {
80  Reset();
81 
82  if (sensor != NULL)
83  {
84  // cache mag calibration state
85  MagCalibrated = sensor->IsMagCalibrated();
86 
87  // Load IMU position
89  bool result = sensor->GetAllPositionCalibrationReports(&reports);
90  if (result)
91  {
92  PositionCalibrationReport imu = reports[reports.GetSize() - 1];
94  // convert from vision to the world frame
95  // TBD convert rotation as necessary?
96  imu.Position.x *= -1.0;
97  imu.Position.z *= -1.0;
98 
100 
103  }
104 
105  // Repopulate CPFOrigin
107 
108  // Subscribe to sensor updates
109  sensor->AddMessageHandler(pHandler);
110 
111  // Initialize the sensor state
112  // TBD: This is a hack to avoid a race condition if sensor status is checked immediately
113  // after sensor creation but before any data has flowed through. We should probably
114  // not depend strictly on data flow to determine capabilities like orientation and position
115  // tracking, or else use some sort of synchronous method to wait for data
116  LocklessState init;
118  UpdatedState.SetState(init);
119  }
120 
121  return true;
122 }
123 
124 // Resets the current orientation
126 {
127  Lock::Locker lockScope(pHandler->GetHandlerLock());
128 
129  UpdatedState.SetState(LocklessState());
131  WorldFromImu.Pose = ImuFromCpf.Inverted(); // place CPF at the origin, not the IMU
136 
137  ExposureRecordHistory.Clear();
141  Stage = 0;
142 
143  MagRefs.Clear();
144  MagRefIdx = -1;
146  AccelOffset = Vector3d();
147 
150  FAngV.Clear();
151 
153 }
154 
155 //-------------------------------------------------------------------------------------
156 // Vision & message processing
157 
159 {
160  // do nothing
162 }
163 
165 {
166  // simply save the observation for use in the next OnVisionSuccess call;
167  // this should not have unintended side-effects for position filtering,
168  // since the vision time is not updated and the system keeps thinking we don't have vision yet
169  CameraFromImu.Pose = cameraFromImu;
170 }
171 
172 void SensorFusion::OnVisionSuccess(const Transform<double>& cameraFromImu, UInt32 exposureCounter)
173 {
174  Lock::Locker lockScope(pHandler->GetHandlerLock());
175 
177 
179 
180  // ********* LastVisionExposureRecord *********
181 
182  // Skip old data and use the record that matches the exposure counter
183  while (!ExposureRecordHistory.IsEmpty() &&
184  (ExposureRecordHistory.PeekFront().ExposureCounter <= exposureCounter))
185  {
187  }
188 
189  // Use current values if we don't have historical data
190  // Right now, this will happen if we get first frame after prediction failure,
191  // and this exposure wasn't in the buffer. (TBD: Unlikely.. unless IMU message wasn't sent?)
192  if (LastVisionExposureRecord.ExposureCounter != exposureCounter)
194 
195  // ********* CameraFromImu *********
196 
197  // This is stored in the camera frame, so need to be careful when combining with the IMU data,
198  // which is in the world frame
199 
200  Transformd cameraFromImuPrev = CameraFromImu.Pose;
201  CameraFromImu.Pose = cameraFromImu;
203 
204  // Check LastVisionExposureRecord.Delta.TimeInSeconds to avoid divide by zero, which we could (rarely)
205  // get if we didn't have exposures delta for history (skipped exposure counters
206  // due to video mode change that stalls USB, etc).
208  {
209  Vector3d visionVelocityInImuFrame = (cameraFromImu.Translation - cameraFromImuPrev.Translation) /
211  // Use the accel data to estimate the velocity at the exposure time
212  // (as opposed to the average velocity between exposures)
215  CameraFromImu.LinearVelocity = visionVelocityInImuFrame +
216  WorldFromCamera.Inverted().Rotate(imuVelocityInWorldFrame);
217  }
218  else
219  {
221  }
222 }
223 
225 {
226  PoseStated worldFromImuVision = WorldFromCamera * CameraFromImu;
227  // Here we need to compute the difference between worldFromImuVision and WorldFromImu.
228  // However this difference needs to be represented in the World frame, not IMU frame.
229  // Therefore the computation is different from simply worldFromImuVision.Pose * WorldFromImu.Pose.Inverted().
230  PoseStated err;
231  err.Pose.Rotation = worldFromImuVision.Pose.Rotation *
233  err.Pose.Translation = worldFromImuVision.Pose.Translation -
235  err.LinearVelocity = worldFromImuVision.LinearVelocity -
237  return err;
238 }
239 
241 {
242  Lock::Locker lockScope(pHandler->GetHandlerLock());
243 
244  // Combine the small deltas together
245  // Should only be one iteration, unless we are skipping camera frames
246  ExposureRecord record;
248 
249  while (!ExposureRecordHistory.IsEmpty() &&
250  (ExposureRecordHistory.PeekFront().ExposureCounter <= exposureCounter))
251  {
252  record = ExposureRecordHistory.PopFront();
253  delta.AdvanceByDelta(record.ImuOnlyDelta);
254  }
255  // Put the combine exposure record back in the history, for use in HandleVisionSuccess(...)
256  record.ImuOnlyDelta = delta;
257  ExposureRecordHistory.PushFront(record);
258 
259  Transformd result;
260  if (record.VisionTrackingAvailable)
261  {
262  // if the tracking is working normally, use the change in the main state (SFusion output)
263  // to compute the prediction
264  result = CameraFromImu.Pose *
265  LastVisionExposureRecord.WorldFromImu.Pose.Inverted() * record.WorldFromImu.Pose;
266  }
267  else
268  {
269  // if we just acquired vision, the main state probably doesn't have the correct position,
270  // so can't rely on it for prediction
271 
272  // solution: use the accelerometer and vision velocity to propagate the previous sample forward
273  // (don't forget to transform IMU to the camera frame)
274  result = Transform<double>
275  (
279  );
280  }
281 
282  return result;
283 }
284 
286 {
288  return;
289 
290  // Put the sensor readings into convenient local variables
291  Vector3d gyro(msg.RotationRate);
292  Vector3d accel(msg.Acceleration);
293  Vector3d mag(msg.MagneticField);
294  double DeltaT = msg.TimeDelta;
295 
296  // Keep track of time
298  // We got an update in the last 60ms and the data is not very old
299  bool visionIsRecent = (GetTime() - LastVisionAbsoluteTime < 0.07) && (GetVisionLatency() < 0.25);
300  Stage++;
301 
302  // Insert current sensor data into filter history
303  FAngV.PushBack(gyro);
304  FAccelInImuFrame.Update(accel, DeltaT, Quatd(gyro, gyro.Length() * DeltaT));
305 
306  // Process raw inputs
307  // in the future the gravity offset can be calibrated using vision feedback
308  Vector3d accelInWorldFrame = WorldFromImu.Pose.Rotate(accel) - Vector3d(0, 9.8, 0);
309 
310  // Recompute the vision error to account for all the corrections and the new data
312 
313  // Update headset orientation
314  WorldFromImu.StoreAndIntegrateGyro(gyro, DeltaT);
315  // Tilt correction based on accelerometer
316  if (EnableGravity)
317  applyTiltCorrection(DeltaT);
318  // Yaw correction based on camera
319  if (EnableYawCorrection && visionIsRecent)
320  applyVisionYawCorrection(DeltaT);
321  // Yaw correction based on magnetometer
322  if (EnableYawCorrection && MagCalibrated) // MagCalibrated is always false for DK2 for now
323  applyMagYawCorrection(mag, DeltaT);
324  // Focus Correction
325  if ((FocusDirection.x != 0.0f || FocusDirection.z != 0.0f) && FocusFOV < Mathf::Pi)
326  applyFocusCorrection(DeltaT);
327 
328  // Update camera orientation
329  if (EnableCameraTiltCorrection && visionIsRecent)
330  applyCameraTiltCorrection(accel, DeltaT);
331 
332  // The quaternion magnitude may slowly drift due to numerical error,
333  // so it is periodically normalized.
334  if ((Stage & 0xFF) == 0)
335  {
338  }
339 
340  // Update headset position
341  if (VisionPositionEnabled && visionIsRecent)
342  {
343  // Integrate UMI and velocity here up to a fixed amount of time after vision.
344  WorldFromImu.StoreAndIntegrateAccelerometer(accelInWorldFrame + AccelOffset, DeltaT);
345  // Position correction based on camera
346  applyPositionCorrection(DeltaT);
347  // Compute where the neck pivot would be.
349  }
350  else
351  {
352  // Fall back onto internal head model
353  // Use the last-known neck pivot position to figure out the expected IMU position.
354  // (should be the opposite of SensorFusion::setNeckPivotFromPose)
357 
358  // We can't trust velocity past this point.
360  WorldFromImu.LinearAcceleration = accelInWorldFrame;
361  }
362 
363  // Compute the angular acceleration
364  WorldFromImu.AngularAcceleration = (FAngV.GetSize() >= 12 && DeltaT > 0) ?
365  (FAngV.SavitzkyGolayDerivative12() / DeltaT) : Vector3d();
366 
367  // Update the dead reckoning state used for incremental vision tracking
372 
374  Recording::GetRecorder().LogData("sfStage", (double)Stage);
376  //Recorder::LogData("sfAngAcc", State.AngularAcceleration);
377  //Recorder::LogData("sfAngVel", State.AngularVelocity);
378  //Recorder::LogData("sfLinAcc", State.LinearAcceleration);
379  //Recorder::LogData("sfLinVel", State.LinearVelocity);
380 
381  // Store the lockless state.
382  LocklessState lstate;
386  if (VisionPositionEnabled && visionIsRecent)
388 
389  //A convenient means to temporarily extract this flag
390  TPH_IsPositionTracked = visionIsRecent;
391 
392  lstate.State = WorldFromImu;
393  lstate.Temperature = msg.Temperature;
394  lstate.Magnetometer = mag;
395  UpdatedState.SetState(lstate);
396 }
397 
399 {
405 
406  // Every new exposure starts from zero
409 }
410 
411 // If you have a known-good pose, this sets the neck pivot position.
413 {
414  WorldFromNeck = worldFromImu * ImuFromCpf * CpfFromNeck;
415 }
416 
417 // These two functions need to be moved into Quat class
418 // Compute a rotation required to transform "from" into "to".
420 {
421  Vector3d axis = from.Cross(to);
422  if (axis.LengthSq() == 0)
423  // this handles both collinear and zero-length input cases
424  return Quatd();
425  double angle = from.Angle(to);
426  return Quatd(axis, angle);
427 }
428 
429 // Compute the part of the quaternion that rotates around Y axis
431 {
432  if (error.y == 0)
433  return Quatd();
434  double phi = atan2(error.w, error.y);
435  double alpha = Mathd::Pi - 2 * phi;
436  return Quatd(Axis_Y, alpha);
437 }
438 
440 {
441  // Each component of gainPos is equivalent to a Kalman gain of (sigma_process / sigma_observation)
442  const Vector3d gainPos = Vector3d(10, 10, 8);
443  const Vector3d gainVel = gainPos.EntrywiseMultiply(gainPos) * 0.5;
444  const Vector3d gainAccel = gainVel * 0.5;
445  const double snapThreshold = 0.1; // Large value (previously 0.01, which caused frequent jumping)
446 
447  Vector3d correctionPos, correctionVel;
448  if (VisionError.Pose.Translation.LengthSq() > (snapThreshold * snapThreshold) ||
449  !(UpdatedState.GetState().StatusFlags & Status_PositionTracked))
450  {
451  // high error or just reacquired position from vision - apply full correction
452 
453  // to know where we are right now, take the vision pose (which is slightly old)
454  // and update it using the imu data since then
455  PoseStated worldFromImuVision = WorldFromCamera * CameraFromImu;
456  for (unsigned int i = 0; i < ExposureRecordHistory.GetSize(); i++)
457  worldFromImuVision.AdvanceByDelta(ExposureRecordHistory.PeekFront(i).ImuOnlyDelta);
458  worldFromImuVision.AdvanceByDelta(NextExposureRecord.ImuOnlyDelta);
459 
460  correctionPos = worldFromImuVision.Pose.Translation - WorldFromImu.Pose.Translation;
461  correctionVel = worldFromImuVision.LinearVelocity - WorldFromImu.LinearVelocity;
462  AccelOffset = Vector3d();
463  }
464  else
465  {
466  correctionPos = VisionError.Pose.Translation.EntrywiseMultiply(gainPos) * deltaT;
467  correctionVel = VisionError.Pose.Translation.EntrywiseMultiply(gainVel) * deltaT;
469  }
470 
471  WorldFromImu.Pose.Translation += correctionPos;
472  WorldFromImu.LinearVelocity += correctionVel;
473 
474  // Update the exposure records so that we don't apply the same correction twice
477  for (unsigned int i = 0; i < ExposureRecordHistory.GetSize(); i++)
478  {
479  PoseStated& state = ExposureRecordHistory.PeekBack(i).WorldFromImu;
480  state.Pose.Translation += correctionPos;
481  state.LinearVelocity += correctionVel;
482  }
483 }
484 
486 {
487  const double gain = 0.25;
488  const double snapThreshold = 0.1;
489 
491 
492  Quatd correction;
493  if (Alg::Abs(yawError.w) < cos(snapThreshold / 2)) // angle(yawError) > snapThreshold
494  // high error, jump to the vision position
495  correction = yawError;
496  else
497  correction = yawError.Nlerp(Quatd(), gain * deltaT);
498 
500 
501  // Update the exposure records so that we don't apply the same correction twice
503  for (unsigned int i = 0; i < ExposureRecordHistory.GetSize(); i++)
504  {
505  PoseStated& state = ExposureRecordHistory.PeekBack(i).WorldFromImu;
506  state.Pose.Rotation = correction * state.Pose.Rotation;
507  }
508 }
509 
511 {
512  const double minMagLengthSq = Mathd::Tolerance; // need to use a real value to discard very weak fields
513  const double maxMagRefDist = 0.1;
514  const double maxTiltError = 0.05;
515  const double proportionalGain = 0.01;
516  const double integralGain = 0.0005;
517 
518  Vector3d magInWorldFrame = WorldFromImu.Pose.Rotate(mag);
519  // verify that the horizontal component is sufficient
520  if (magInWorldFrame.x * magInWorldFrame.x + magInWorldFrame.z * magInWorldFrame.z < minMagLengthSq)
521  return;
522  magInWorldFrame.Normalize();
523 
524  // Delete a bad point
525  if (MagRefIdx >= 0 && MagRefs[MagRefIdx].Score < 0)
526  {
527  MagRefs.RemoveAtUnordered(MagRefIdx);
528  MagRefIdx = -1;
529  }
530 
531  // Update the reference point if needed
532  if (MagRefIdx < 0 || mag.Distance(MagRefs[MagRefIdx].InImuFrame) > maxMagRefDist)
533  {
534  // Find a new one
535  MagRefIdx = -1;
536  double bestDist = maxMagRefDist;
537  for (unsigned int i = 0; i < MagRefs.GetSize(); i++)
538  {
539  double dist = mag.Distance(MagRefs[i].InImuFrame);
540  if (bestDist > dist)
541  {
542  bestDist = dist;
543  MagRefIdx = i;
544  }
545  }
546 
547  // Create one if needed
548  if (MagRefIdx < 0 && MagRefs.GetSize() < MagMaxReferences)
549  {
550  MagRefs.PushBack(MagReferencePoint(mag, WorldFromImu.Pose, 1000));
551  }
552  }
553 
554  if (MagRefIdx >= 0)
555  {
556  Vector3d magRefInWorldFrame = MagRefs[MagRefIdx].WorldFromImu.Rotate(MagRefs[MagRefIdx].InImuFrame);
557  magRefInWorldFrame.Normalize();
558 
559  // If the vertical angle is wrong, decrease the score and do nothing
560  if (Alg::Abs(magRefInWorldFrame.y - magInWorldFrame.y) > maxTiltError)
561  {
562  MagRefs[MagRefIdx].Score -= 1;
563  return;
564  }
565 
566  MagRefs[MagRefIdx].Score += 2;
567 #if 0
568  // this doesn't seem to work properly, need to investigate
569  Quatd error = vectorAlignmentRotation(magW, magRefW);
570  Quatd yawError = extractYawRotation(error);
571 #else
572  // Correction is computed in the horizontal plane
573  magInWorldFrame.y = magRefInWorldFrame.y = 0;
574  Quatd yawError = vectorAlignmentRotation(magInWorldFrame, magRefInWorldFrame);
575 #endif
576  Quatd correction = yawError.Nlerp(Quatd(), proportionalGain * deltaT) *
578  MagCorrectionIntegralTerm = MagCorrectionIntegralTerm * yawError.Nlerp(Quatd(), integralGain * deltaT);
579 
581  }
582 }
583 
585 {
586  const double gain = 0.25;
587  const double snapThreshold = 0.1;
588  const Vector3d up(0, 1, 0);
589 
591  Quatd error = vectorAlignmentRotation(accelInWorldFrame, up);
592 
593  Quatd correction;
594  if (FAccelInImuFrame.GetSize() == 1 ||
595  ((Alg::Abs(error.w) < cos(snapThreshold / 2) && FAccelInImuFrame.Confidence() > 0.75)))
596  // full correction for start-up
597  // or large error with high confidence
598  correction = error;
599  else if (FAccelInImuFrame.Confidence() > 0.5)
600  correction = error.Nlerp(Quatd(), gain * deltaT);
601  else
602  // accelerometer is unreliable due to movement
603  return;
604 
606 }
607 
609 {
610  const double snapThreshold = 0.02; // in radians
611  const double maxCameraPositionOffset = 0.2;
612  const Vector3d up(0, 1, 0), forward(0, 0, -1);
613 
614  // for startup use filtered value instead of instantaneous for stability
617 
619  // this is what the hypothetical camera-mounted accelerometer would show
620  Vector3d accelInCameraFrame = cameraFromImu.Rotate(accel);
621  FAccelInCameraFrame.Update(accelInCameraFrame, deltaT);
623 
624  Quatd error1 = vectorAlignmentRotation(cameraAccelInWorldFrame, up);
625  // cancel out yaw rotation
626  Vector3d forwardCamera = (error1 * WorldFromCamera.Rotation).Rotate(forward);
627  forwardCamera.y = 0;
628  Quatd error2 = vectorAlignmentRotation(forwardCamera, forward);
629  // combined error
630  Quatd error = error2 * error1;
631 
632  double confidence = FAccelInCameraFrame.Confidence();
633  // penalize the confidence if looking away from the camera
634  // TODO: smooth fall-off
635  if (CameraFromImu.Pose.Rotate(forward).Angle(forward) > 1)
636  confidence *= 0.5;
637 
638  //Convenient global variable to temporarily extract this data.
639  TPH_CameraPoseConfidence = confidence;
640  //Allow override of confidence threshold
641  double confidenceThreshold = 0.75f;
643  {
645  }
646 
647  Quatd correction;
648  if (FAccelInCameraFrame.GetSize() == 1 ||
649  confidence > WorldFromCameraConfidence + 0.2 ||
650  // disabled due to false positives when moving side to side
651 // (Alg::Abs(error.w) < cos(5 * snapThreshold / 2) && confidence > 0.55) ||
652  (Alg::Abs(error.w) < cos(snapThreshold / 2) && confidence > confidenceThreshold))
653  {
654  // large error with high confidence
655  correction = error;
656  // update the confidence level
657  WorldFromCameraConfidence = confidence;
658  }
659  else
660  {
661  // accelerometer is unreliable due to movement
662  return;
663  }
664 
665  Transformd newWorldFromCamera(correction * WorldFromCamera.Rotation, Vector3d());
666 
667  // compute a camera position change that together with the camera rotation would result in zero player movement
668  newWorldFromCamera.Translation += (WorldFromCamera * CameraFromImu.Pose).Translation -
669  (newWorldFromCamera * CameraFromImu.Pose).Translation;
670  // if the new position is too far, reset to default
671  // (can't hide the rotation, might as well use it to reset the position)
672  if (newWorldFromCamera.Translation.DistanceSq(DefaultWorldFromCamera.Translation) > maxCameraPositionOffset * maxCameraPositionOffset)
673  newWorldFromCamera.Translation = DefaultWorldFromCamera.Translation;
674 
675  WorldFromCamera = newWorldFromCamera;
676 
677  //Convenient global variable to temporarily extract this data.
682 }
683 
685 {
686  Vector3d up = Vector3d(0, 1, 0);
687  double gain = 0.01;
688  Vector3d currentDir = WorldFromImu.Pose.Rotate(Vector3d(0, 0, 1));
689 
690  Vector3d focusYawComponent = FocusDirection.ProjectToPlane(up);
691  Vector3d currentYawComponent = currentDir.ProjectToPlane(up);
692 
693  double angle = focusYawComponent.Angle(currentYawComponent);
694 
695  if( angle > FocusFOV )
696  {
697  Quatd yawError;
698  if ( FocusFOV != 0.0f)
699  {
700  Vector3d lFocus = Quatd(up, -FocusFOV).Rotate(focusYawComponent);
701  Vector3d rFocus = Quatd(up, FocusFOV).Rotate(focusYawComponent);
702  double lAngle = lFocus.Angle(currentYawComponent);
703  double rAngle = rFocus.Angle(currentYawComponent);
704  if(lAngle < rAngle)
705  {
706  yawError = vectorAlignmentRotation(currentDir, lFocus);
707  }
708  else
709  {
710  yawError = vectorAlignmentRotation(currentDir, rFocus);
711  }
712  }
713  else
714  {
715  yawError = vectorAlignmentRotation(currentYawComponent, focusYawComponent);
716  }
717 
718  Quatd correction = yawError.Nlerp(Quatd(), gain * deltaT);
720  }
721 }
722 
723 //------------------------------------------------------------------------------------
724 // Focus filter setting functions
725 
727 {
729 }
730 
732 {
733  FocusDirection = direction;
734 }
735 
737 {
738  OVR_ASSERT(fov >= 0.0);
739  FocusFOV = fov;
740 }
741 
743 {
744  FocusDirection = Vector3d(0.0, 0.0, 0.0);
745  FocusFOV = 0.0f;
746 }
747 
748 //-------------------------------------------------------------------------------------
749 // Head model functions.
750 
751 // Sets up head-and-neck model and device-to-pupil dimensions from the user's profile.
752 void SensorFusion::SetUserHeadDimensions(Profile const &profile, HmdRenderInfo const &hmdRenderInfo)
753 {
754  float neckeye[2];
755  int count = profile.GetFloatValues(OVR_KEY_NECK_TO_EYE_DISTANCE, neckeye, 2);
756  // Make sure these are vaguely sensible values.
757  if (count == 2)
758  {
759  OVR_ASSERT ( ( neckeye[0] > 0.05f ) && ( neckeye[0] < 0.5f ) );
760  OVR_ASSERT ( ( neckeye[1] > 0.05f ) && ( neckeye[1] < 0.5f ) );
761  SetHeadModel ( Vector3f ( 0.0, neckeye[1], -neckeye[0] ) );
762  }
763 
764  // Find the distance from the center of the screen to the "center eye"
765  // This center eye is used by systems like rendering & audio to represent the player,
766  // and they will handle the offsets needed from there to each actual eye.
767 
768  // HACK HACK HACK
769  // We know for DK1 the screen->lens surface distance is roughly 0.049f, and that the faceplate->lens is 0.02357f.
770  // We're going to assume(!!!!) that all HMDs have the same screen->faceplate distance.
771  // Crystal Cove was measured to be roughly 0.025 screen->faceplate which agrees with this assumption.
772  // TODO: do this properly! Update: Measured this at 0.02733 with a CC prototype, CES era (PT7), on 2/19/14 -Steve
773  float screenCenterToMidplate = 0.02733f;
774  float centerEyeRelief = hmdRenderInfo.GetEyeCenter().ReliefInMeters;
775  float centerPupilDepth = screenCenterToMidplate + hmdRenderInfo.LensSurfaceToMidplateInMeters + centerEyeRelief;
776  SetCenterPupilDepth ( centerPupilDepth );
777 
779 }
780 
782 {
784 }
785 
786 void SensorFusion::SetHeadModel(const Vector3f &headModel, bool resetNeckPivot /*= true*/ )
787 {
788  Lock::Locker lockScope(pHandler->GetHandlerLock());
789  // The head model should look something like (0, 0.12, -0.12), so
790  // these asserts are to try to prevent sign problems, as
791  // they can be subtle but nauseating!
792  OVR_ASSERT ( headModel.y > 0.0f );
793  OVR_ASSERT ( headModel.z < 0.0f );
794  CpfFromNeck = Transformd(Quatd(), (Vector3d)headModel).Inverted();
795  if ( resetNeckPivot )
796  {
798  }
799 }
800 
802 {
803  return CenterPupilDepth;
804 }
805 
806 void SensorFusion::SetCenterPupilDepth(float centerPupilDepth)
807 {
808  CenterPupilDepth = centerPupilDepth;
809 
810  Transformd screenFromCpf(Quatd(), Vector3d(0, 0, centerPupilDepth));
811  ImuFromCpf = ImuFromScreen * screenFromCpf;
812 
814 }
815 
816 //-------------------------------------------------------------------------------------
817 
818 // This is a "perceptually tuned predictive filter", which means that it is optimized
819 // for improvements in the VR experience, rather than pure error. In particular,
820 // jitter is more perceptible at lower speeds whereas latency is more perceptible
821 // after a high-speed motion. Therefore, the prediction interval is dynamically
822 // adjusted based on speed. Significant more research is needed to further improve
823 // this family of filters.
824 static Transform<double> calcPredictedPose(const PoseState<double>& poseState, double predictionDt)
825 {
826  Transform<double> pose = poseState.Pose;
827  const double linearCoef = 1.0;
828  Vector3d angularVelocity = poseState.AngularVelocity;
829  double angularSpeed = angularVelocity.Length();
830 
831  // This could be tuned so that linear and angular are combined with different coefficients
832  double speed = angularSpeed + linearCoef * poseState.LinearVelocity.Length();
833 
834  const double slope = 0.2; // The rate at which the dynamic prediction interval varies
835  double candidateDt = slope * speed; // TODO: Replace with smoothstep function
836 
837  double dynamicDt = predictionDt;
838 
839  // Choose the candidate if it is shorter, to improve stability
840  if (candidateDt < predictionDt)
841  dynamicDt = candidateDt;
842 
843  if (angularSpeed > 0.001)
844  pose.Rotation = pose.Rotation * Quatd(angularVelocity, angularSpeed * dynamicDt);
845 
846  pose.Translation += poseState.LinearVelocity * dynamicDt;
847 
848  return pose;
849 }
850 
851 
852 Transformf SensorFusion::GetPoseAtTime(double absoluteTime) const
853 {
854  SensorState ss = GetSensorStateAtTime ( absoluteTime );
855  return ss.Predicted.Pose;
856 }
857 
858 
860 {
861  const LocklessState lstate = UpdatedState.GetState();
862  // Delta time from the last available data
863  const double pdt = absoluteTime - lstate.State.TimeInSeconds;
864 
865  SensorState ss;
866  ss.Recorded = PoseStatef(lstate.State);
867  ss.Temperature = lstate.Temperature;
868  ss.Magnetometer = Vector3f(lstate.Magnetometer);
869  ss.StatusFlags = lstate.StatusFlags;
870 
871  ss.Predicted = ss.Recorded;
872  ss.Predicted.TimeInSeconds = absoluteTime;
873 
874  // Do prediction logic and ImuFromCpf transformation
875  ss.Recorded.Pose = Transformf(lstate.State.Pose * ImuFromCpf);
877  return ss;
878 }
879 
880 unsigned SensorFusion::GetStatus() const
881 {
882  return UpdatedState.GetState().StatusFlags;
883 }
884 
885 //-------------------------------------------------------------------------------------
886 
888 {
890  handleMessage(msg);
891 }
892 
893 //-------------------------------------------------------------------------------------
894 
896 {
898  if (msg.Type == Message_BodyFrame)
899  pFusion->handleMessage(static_cast<const MessageBodyFrame&>(msg));
900  if (msg.Type == Message_ExposureFrame)
901  pFusion->handleExposure(static_cast<const MessageExposureFrame&>(msg));
902 }
903 
904 } // namespace OVR
Vector3d GetFilteredValue() const
SensorState GetSensorStateAtTime(double absoluteTime) const
Quat Inverted() const
Definition: OVR_Math.h:957
virtual void OnVisionPreviousFrame(const Transform< double > &cameraFromImu)
float GetCenterPupilDepth() const
void LogText(const char *fmt,...) OVR_LOG_VAARG_ATTRIBUTE(1
PositionTypeEnum PositionType
Definition: OVR_Device.h:733
void SetCenterPupilDepth(float centerPupilDepth)
void StoreAndIntegrateGyro(Vector3d angVel, double dt)
T Length() const
Definition: OVR_Math.h:509
void applyMagYawCorrection(Vector3d mag, double deltaT)
Vector3 Cross(const Vector3 &b) const
Definition: OVR_Math.h:492
void Normalize()
Definition: OVR_Math.h:521
LocklessUpdater< LocklessState > UpdatedState
PoseState< double > WorldFromImu
T Distance(Vector3 const &b) const
Definition: OVR_Math.h:515
#define NULL
double TPH_CameraPoseConfidenceThresholdOverrideIfNonZero
virtual UByte GetDeviceInterfaceVersion()=0
virtual void OnMessage(const Message &msg)
#define OVR_KEY_NECK_TO_EYE_DISTANCE
Definition: OVR_Profile.h:182
bool AttachToSensor(SensorDevice *sensor)
static const float Pi
Definition: OVR_Math.h:178
double WorldFromCameraConfidence
uint32_t UInt32
Definition: OVR_Types.h:253
virtual Transform< double > GetVisionPrediction(UInt32 exposureCounter)
Vector3 ProjectToPlane(const Vector3 &normal) const
Definition: OVR_Math.h:550
void SetHeadModel(const Vector3f &headModel, bool resetNeckPivot=true)
void AdvanceByDelta(const PoseState< T > &delta)
OVR_FORCE_INLINE void RecordDeviceIfcVersion(UByte)
Definition: OVR_Recording.h:66
Quatd vectorAlignmentRotation(const Vector3d &from, const Vector3d &to)
Transformd WorldFromNeck
static Transform< double > calcPredictedPose(const PoseState< double > &poseState, double predictionDt)
const Transformd DefaultWorldFromCamera(Quatd(), Vector3d(0, 0,-1))
void applyTiltCorrection(double deltaT)
void Normalize()
Definition: OVR_Math.h:908
Quatd extractYawRotation(const Quatd &error)
Vector3< T > Rotate(const Vector3< T > &v) const
Definition: OVR_Math.h:951
Transform< double > Transformd
Definition: OVR_Math.h:1160
void Update(Vector3d value, double deltaT, Quatd deltaQ=Quatd())
virtual bool IsMagCalibrated()
Definition: OVR_Device.h:1022
Vector3< T > AngularAcceleration
unsigned int StatusFlags
void handleMessage(const MessageBodyFrame &msg)
void applyPositionCorrection(double deltaT)
virtual bool GetAllPositionCalibrationReports(Array< PositionCalibrationReport > *)
Definition: OVR_Device.h:1039
MessageType Type
static const double Pi
Definition: OVR_Math.h:202
Transform< float > Transformf
Definition: OVR_Math.h:1159
T DistanceSq(Vector3 const &b) const
Definition: OVR_Math.h:512
OVR_FORCE_INLINE void LogData(const char *, const T &)
Definition: OVR_Recording.h:70
SensorFilterBodyFrame FAccelInCameraFrame
Transform Inverted() const
Definition: OVR_Math.h:1152
virtual void OnVisionSuccess(const Transform< double > &cameraFromImu, UInt32 exposureCounter)
BodyFrameHandler * pHandler
Transformd ImuFromScreen
Transformf GetPoseAtTime(double absoluteTime) const
Quat< double > Quatd
Definition: OVR_Math.h:1086
OVR_FORCE_INLINE void RecordMessage(const Message &)
Definition: OVR_Recording.h:67
Array< MagReferencePoint > MagRefs
T SavitzkyGolayDerivative12() const
OVR_FORCE_INLINE void RecordUserParams(const Vector3f &, float)
Definition: OVR_Recording.h:65
Vector3< T > AngularVelocity
void OnMessage(const MessageBodyFrame &msg)
Vector3< double > Vector3d
Definition: OVR_Math.h:555
#define OVR_ASSERT(p)
double TPH_CameraPoseConfidence
Vector3f GetHeadModel() const
virtual UPInt GetSize(void) const
Definition: OVR_Deque.h:259
SensorFusion(SensorDevice *sensor=0)
PoseState< double > CameraFromImu
void SetFocusFOV(double rads)
int GetFloatValues(const char *key, float *values, int num_vals) const
PoseState< double > VisionError
bool Global_Flag_1
Vector3 EntrywiseMultiply(const Vector3 &b) const
Definition: OVR_Math.h:470
void applyFocusCorrection(double deltaT)
double GetVisionLatency() const
static const double Tolerance
Definition: OVR_Math.h:214
Lock * GetHandlerLock() const
void applyVisionYawCorrection(double deltaT)
CircularBuffer< ExposureRecord > ExposureRecordHistory
void handleExposure(const MessageExposureFrame &msg)
SensorFilterd FAngV
SensorFilterBodyFrame FAccelInImuFrame
Vector3< T > Translation
Definition: OVR_Math.h:1119
Vector3< T > LinearVelocity
unsigned int GetStatus() const
OVR_FORCE_INLINE void RecordVisionSuccess(UInt32)
Definition: OVR_Recording.h:69
void SetUserHeadDimensions(Profile const &profile, HmdRenderInfo const &hmdRenderInfo)
ExposureRecord NextExposureRecord
Quat Nlerp(const Quat &other, T a)
Definition: OVR_Math.h:943
T LengthSq() const
Definition: OVR_Math.h:506
ExposureRecord LastVisionExposureRecord
float TPH_CameraPoseOrientationWxyz[4]
PoseStated computeVisionError()
float LensSurfaceToMidplateInMeters
Definition: OVR_Stereo.h:273
OVR_FORCE_INLINE void RecordLedPositions(const Array< PositionCalibrationReport > &)
Definition: OVR_Recording.h:64
PoseState< float > PoseStatef
void StoreAndIntegrateAccelerometer(Vector3d linearAccel, double dt)
EyeConfig GetEyeCenter() const
Definition: OVR_Stereo.h:327
static double OVR_STDCALL GetSeconds()
Definition: OVR_Timer.cpp:51
virtual void AddMessageHandler(MessageHandler *handler)
bool IsMotionTrackingEnabled() const
virtual void OnVisionFailure()
Quat< T > Rotation
Definition: OVR_Math.h:1118
double GetTime() const
T Angle(const Vector3 &b) const
Definition: OVR_Math.h:497
void PushBack(const T &e)
OVR_FORCE_INLINE Recorder & GetRecorder()
Definition: OVR_Recording.h:77
Vector3< float > Vector3f
Definition: OVR_Math.h:554
Transform< T > Pose
bool TPH_IsPositionTracked
MessageExposureFrame LastMessageExposureFrame
OVR_FORCE_INLINE const T Abs(const T v)
Definition: OVR_Alg.h:79
Vector3< T > Rotate(const Vector3< T > &v) const
Definition: OVR_Math.h:1121
Transformd WorldFromCamera
Vector3< T > LinearAcceleration
void setNeckPivotFromPose(Transformd const &pose)
void applyCameraTiltCorrection(Vector3d accel, double deltaT)
bool IsAttachedToSensor() const