Monado OpenXR Runtime
Todo List
Global _calculate_squeeze_value (struct survive_device *survive)
find a good formula for squeeze value
Global _process_hmd_button_event (struct survive_device *survive, const struct SurviveSimpleButtonEvent *e)
engagement changed
Global add_connected_devices (struct survive_system *ss)
We don't know how many device added events we will get. After 25ms Index HMD + Controllers are added here. So 250ms should be a safe value. Device added just means libsurvive knows the usb devices, the config will then be loaded asynchronously.
Global android_custom_surface_get_display_metrics (struct _JavaVM *vm, void *context, struct xrt_android_display_metrics *out_metrics)
implement non-deprecated codepath for api 30+
Global android_device_get_tracked_pose (struct xrt_device *xdev, enum xrt_input_name name, uint64_t at_timestamp_ns, struct xrt_space_relation *out_relation)
assuming that orientation is actually currently tracked.
Global android_globals
Do we need locking here?
Global arduino_get_fusion_pose (struct arduino_device *ad, enum xrt_input_name name, struct xrt_space_relation *out_relation)
assuming that orientation is actually currently tracked.
Global calc_pose_data (struct comp_renderer *r, enum comp_target_fov_source fov_source, struct xrt_fov out_fovs[XRT_MAX_VIEWS], struct xrt_pose out_world[XRT_MAX_VIEWS], struct xrt_pose out_eye[XRT_MAX_VIEWS], uint32_t view_count)
get actual ipd_meters
Global CDeviceDriver_Monado::Activate (vr::TrackedDeviceIndex_t unObjectId)

: proper serial and model number

update when ipd changes

Global CDeviceDriver_Monado::CDeviceDriver_Monado (struct xrt_instance *xinst, struct xrt_device *xdev)

latency

get ipd user setting from monado session

more than 2 views

more versatile IPD calculation

Global CDeviceDriver_Monado::DebugRequest (const char *pchRequest, char *pchResponseBuffer, uint32_t unResponseBufferSize)
Global CDeviceDriver_Monado::GetPose ()

: Monado head model?

Global CDeviceDriver_Monado_Controller::Activate (vr::TrackedDeviceIndex_t unObjectId)
handle trackers etc
Global CDeviceDriver_Monado_Controller::AddMonadoInput (struct binding_template *b)
how to handle poses?
Global CDeviceDriver_Monado_Controller::AddOutputControl (enum xrt_output_name monado_output_name, const char *steamvr_control_path)
when there are multiple output types: XRT_GET_OUTPUT_TYPE(monado_output_name);
Global CDeviceDriver_Monado_Controller::GetPose ()
better method to find grip name
Global CDeviceDriver_Monado_Controller::PoseUpdateThreadFunction ()
figure out the best pose update rate
Global check_error (struct rs_source *rs, rs2_error *e, const char *file, int line)
Unify check_error() and DO() usage thorough the driver.
Global check_slam_capabilities (rs2_device_list *device_list, int dev_idx, bool *out_hslam, bool *out_dslam)
Consider adding the sensors list to the rs_container
Global client_d3d11_create_swapchain (struct xrt_compositor *xc, const struct xrt_swapchain_create_info *info, struct xrt_swapchain **out_xsc)
not sure - dedicated allocation
Global client_d3d11_swapchain_wait_image (struct xrt_swapchain *xsc, uint64_t timeout_ns, uint32_t index)
discard old contents?
Global client_d3d12_create_swapchain (struct xrt_compositor *xc, const struct xrt_swapchain_create_info *info, struct xrt_swapchain **out_xsc)
No idea if this is right, might depend on whether it's the compute or graphics compositor!
Global client_d3d12_swapchain_wait_image (struct xrt_swapchain *xsc, uint64_t timeout_ns, uint32_t index)
discard old contents?
Global client_egl_context_begin (struct xrt_compositor *xc, enum client_gl_context_reason reason)
Handle this better, don't just assume that the context is current.
Global client_egl_context_end (struct xrt_compositor *xc, enum client_gl_context_reason reason)
Handle this better, don't just assume that the context is current.
Global client_gl_eglimage_swapchain_create (struct xrt_compositor *xc, const struct xrt_swapchain_create_info *info, struct xrt_swapchain_native *xscn, struct client_gl_swapchain **out_sc)
this matches the behavior of the Google test, but is not itself tested or fully rationalized.
Global client_vk_swapchain_create (struct xrt_compositor *xc, const struct xrt_swapchain_create_info *info, struct xrt_swapchain **out_xsc)
less conservative pipeline stage masks based on usage
Global comp_main_create_system_compositor

Support more like, depth/float formats etc, remember to update the GL client as well.

: Query all supported refresh rates of the current mode

Global comp_mirror_do_blit (struct comp_mirror_to_debug_gui *m, struct vk_bundle *vk, uint64_t frame_id, uint64_t predicted_display_time_ns, VkImage from_image, VkImageView from_view, VkSampler from_sampler, VkExtent2D from_extent, struct xrt_normalized_rect from_rect)
Better handling of error?
Global comp_multi_create_system_compositor (struct xrt_compositor_native *xcn, struct u_pacing_app_factory *upaf, const struct xrt_system_compositor_info *xsci, bool do_warm_start, struct xrt_system_compositor **out_xsysc)
Make the clients not go from IDLE to READY before we have completed a first frame.
Global comp_render_cs_layer (struct render_compute *crc, uint32_t view_index, const struct comp_layer *layers, const uint32_t layer_count, const struct xrt_normalized_rect *pre_transform, const struct xrt_pose *world_pose, const struct xrt_pose *eye_pose, const VkImage target_image, const VkImageView target_image_view, const struct render_viewport_data *target_view, bool do_timewarp)
: If Vulkan 1.2, use VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT and skip this
Global comp_renderer_draw (struct comp_renderer *r)
This should be discard.
Global comp_target_swapchain_acquire_next_image (struct comp_target *ct, uint32_t *out_index)
what error to return here?
Global comp_window_direct_create_surface (struct comp_target_swapchain *cts, VkDisplayKHR display, uint32_t width, uint32_t height)
actually select the plane.
Global comp_window_direct_nvidia_init (struct comp_target *ct)
what if we have multiple allowlisted HMD displays connected?
Global comp_window_mswin_destroy (struct comp_target *ct)
Global comp_window_mswin_fullscreen (struct comp_window_mswin *w)
Global comp_window_mswin_init_swapchain (struct comp_target *ct, uint32_t width, uint32_t height)
Global comp_window_mswin_thread (struct comp_window_mswin *cwm)
icon
Global comp_window_mswin_update_window_title (struct comp_target *ct, const char *title)
Global compositor_get_display_refresh_rate (struct xrt_compositor *xc, float *out_display_refresh_rate_hz)
: Implement the method to change display refresh rate.
Global compositor_request_display_refresh_rate (struct xrt_compositor *xc, float display_refresh_rate_hz)
: Implement the method to change display refresh rate.
Global create_controller (ohmd_context *ctx, int device_idx, int device_flags, enum xrt_device_type device_type)
Generic tracker input profile?
Global create_ddev (struct rs_ddev *rs, int device_idx)
0 index hardcoded, check device with RS2_EXTENSION_POSE_SENSOR or similar instead
Global create_frame_with_format_of_size (struct xrt_frame *xf, uint32_t w, uint32_t h, enum xrt_format format, struct xrt_frame **out_frame)
Allocate from a pool of frames.
Global create_hmd (ohmd_context *ctx, int device_idx, int device_flags)
These values are most likely wrong, needs to be transposed and correct channel.
Global CServerDriver_Monado::Init (vr::IVRDriverContext *pDriverContext)

instance initialization is difficult to replicate

provide a serial number

Global d3d_vk_format_to_dxgi (int64_t format)

DXGI_FORMAT_D24_UNORM_S8_UINT ?

DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM ?

Global daydream_get_fusion_pose (struct daydream_device *daydream, enum xrt_input_name name, struct xrt_space_relation *out_relation)
assuming that orientation is actually currently tracked.
Global daydream_run_thread (void *ptr)
this should be injected at construction time
Global decide (xrt_vec3 one, xrt_vec3 two, bool *out)
This is flickery; investigate once we get better hand tracking
Global depthai_guess_ir_drivers (struct depthai_fs *depthai)
this function will look slightly different for an OAK-D Pro with dot projectors - mine only has floodlights
Global depthai_setup_stereo_grayscale_pipeline (struct depthai_fs *depthai)
This code will turn the exposure time down, but you may not want it. Or we may want to rework Monado's AEG code to control the IR floodlight brightness in concert with the exposure itme. For now, disable.
Global do_post_create_vulkan_setup (struct vk_bundle *vk, const struct xrt_swapchain_create_info *info, struct comp_swapchain *sc)
actually out of memory
Global euroc_player_load_next_frame (struct euroc_player *ep, int cam_index, struct xrt_frame *&xf)
Not using xrt_stereo_format because we use two sinks. It would probably be better to refactor everything to use stereo frames instead.
Global FRAME_COUNT
The allocation code is not good, this is a work around for index reuse causing asserts, change the code so we don't need it at all.
Global get_binding (struct oxr_logger *log, struct oxr_sink_logger *slog, struct oxr_session *sess, const struct oxr_action_ref *act_ref, struct oxr_interaction_profile *profile, enum oxr_subaction_path subaction_path, struct oxr_action_input inputs[32], uint32_t *input_count, struct oxr_action_output outputs[32], uint32_t *output_count)
This probably falls on its head if the application doesn't use sub action paths.
Global get_info (ohmd_device *dev, const char *prod)
This are probably all wrong!
Global get_pose (struct vive_controller_device *d, enum xrt_input_name name, uint64_t at_timestamp_ns, struct xrt_space_relation *out_relation)
Vive poses only have index poses.
Global gstreamer_pipeline_stop (struct gstreamer_pipeline *gp)
Should check if we got an error message here or an eos.
Global hand_sim_hand_init (struct u_hand_sim_hand *out_opt, enum xrt_hand xhand, const struct xrt_space_relation *root_pose)
needed?
Global handle_device_name_msg (struct psvr_device *psvr, unsigned char *buffer, int size)
Get the name here.
Global handle_layer (struct xrt_compositor *xc, struct xrt_device *xdev, struct xrt_swapchain *xsc, const struct xrt_layer_data *data, enum xrt_layer_type type)
Real id.
Global handle_reference_space_change_pending (struct oxr_logger *log, struct oxr_session *sess, struct xrt_session_event_reference_space_change_pending *ref_change)
properly handle return (not done yet because requires larger rewrite),
Global hdk_device_update (struct hdk_device *hd)
might not be accurate on some version 1 reports??
Global hdk_found (struct xrt_prober *xp, struct xrt_prober_device **devices, size_t device_count, size_t index, cJSON *attached_data, struct xrt_device **out_xdev)
just assuming anything else is 1.3 for now
Global ht_async_get_hand (struct t_hand_tracking_async *ht_async, enum xrt_input_name name, uint64_t desired_timestamp_ns, struct xrt_hand_joint_set *out_value, uint64_t *out_timestamp_ns)
We could slightly reduce the total number of transforms by putting some of this in ht_async_mainloop
Global ht_device_create_common (struct t_stereo_camera_calibration *calib, bool own_xfctx, struct xrt_frame_context *xfctx, struct t_hand_tracking_sync *sync)
2 hands hardcoded
Global hydra_device_get_tracked_pose (struct xrt_device *xdev, enum xrt_input_name name, uint64_t at_timestamp_ns, struct xrt_space_relation *out_relation)
how do we report this is not (necessarily) the same base space as the HMD?
Global hydra_device_parse_controller (struct hydra_device *hd, uint8_t *buf)
the presence of this suggest we're not decoding the orientation right.
Global hydra_device_update_inputs (struct xrt_device *xdev)
report pose
Global hydra_read_int16_le (uint8_t **bufptr)
nothing actually defines XRT_BIG_ENDIAN when needed!
Global ipc_client_connection_fini (struct ipc_connection *ipc_c)
how to tear down the shared memory?
Global ipc_client_create_system_compositor (struct ipc_connection *ipc_c, struct xrt_image_native_allocator *xina, struct xrt_device *xdev, struct xrt_system_compositor **out_xcs)
remove this param?
Global ipc_compositor_layer_passthrough (struct xrt_compositor *xc, struct xrt_device *xdev, const struct xrt_layer_data *data)
Real id.
Global ipc_compositor_layer_projection (struct xrt_compositor *xc, struct xrt_device *xdev, struct xrt_swapchain *xsc[XRT_MAX_VIEWS], const struct xrt_layer_data *data)
Real id.
Global ipc_compositor_layer_projection_depth (struct xrt_compositor *xc, struct xrt_device *xdev, struct xrt_swapchain *xsc[XRT_MAX_VIEWS], struct xrt_swapchain *d_xsc[XRT_MAX_VIEWS], const struct xrt_layer_data *data)
Real id.
Global ipc_handle_session_begin (volatile struct ipc_client_state *ics)
Pass the view type down.
Global ipc_handle_swapchain_acquire_image (volatile struct ipc_client_state *ics, uint32_t id, uint32_t *out_index)
Look up the index.
Global ipc_handle_swapchain_release_image (volatile struct ipc_client_state *ics, uint32_t id, uint32_t index)
Look up the index.
Global ipc_handle_swapchain_wait_image (volatile struct ipc_client_state *ics, uint32_t id, uint64_t timeout_ns, uint32_t index)
Look up the index.
Global ipc_layer_entry::xdev_id
what is this used for?
Global ipc_server_handle_client_connected (struct ipc_server *vs, xrt_ipc_handle_t ipc_handle)
validate ID.
Global ipc_server_mainloop_deinit (struct ipc_server_mainloop *ml)

close pipe_write or epoll_fd?

close epoll_fd?

Global ipc_shared_memory::display
doesn't account for overfill for timewarp or distortion?
Global ipc_syscomp_destroy (struct xrt_system_compositor *xsc)
Implement
Global kb4_unproject (const struct t_camera_model_params *dist, const float x, const float y, float *out_x, float *out_y, float *out_z)
I'm not 100% sure if kb4 is always non-injective. basalt-headers always returns true here, so it might be wrong too.
Global leap_input_loop (void *ptr_to_xdev)
(Moses Turner) Could be using LeapController.now() to try to emulate our own pose prediction, but I ain't got time for that
Global locked_pool_wake_worker_if_allowed (struct pool *p)
Is this a error?
Global m_filter_euro_quat_init (struct m_filter_euro_quat *f, double fc_min, double fc_min_d, double beta)
fix order of args
Global m_relation_history_get (const struct m_relation_history *rh, uint64_t at_timestamp_ns, struct xrt_space_relation *out_relation)
Does interpolating the velocities make any sense?
Global multi_main_loop (struct multi_system_compositor *msc)

Pick the blend mode from primary client.

Pick a good display time.

Global MULTI_MAX_CLIENTS

Move to xrt_limits.h, or make dynamic to remove limit.

Global MULTI_MAX_LAYERS

Move to xrt_limits.h and share.

Global null_compositor::frame
Insert your own required members here
Global ogl_texture_target_for_swapchain_info (const struct xrt_swapchain_create_info *info, uint32_t *out_tex_target, uint32_t *out_tex_param_name)
test GL_TEXTURE_EXTERNAL_OES on Android
Global oh_device_create (ohmd_context *ctx, bool no_hmds, struct xrt_device **out_xdevs)
: support mix of 3dof and 6dof OpenHMD devices
Global oh_device_get_tracked_pose (struct xrt_device *xdev, enum xrt_input_name name, uint64_t at_timestamp_ns, struct xrt_space_relation *out_relation)

possibly hoist this out of the driver level, to provide as a common service?

this is a hack - should really get a timestamp on the USB data and use that instead.

assuming that orientation is actually currently tracked

adjust for latency here

Global oh_device_set_output (struct xrt_device *xdev, enum xrt_output_name name, const union xrt_output_value *value)
OpenHMD haptic API not finished
Global oxr_action_get_current_interaction_profile (struct oxr_logger *log, struct oxr_session *sess, XrPath topLevelUserPath, XrInteractionProfileState *interactionProfile)

This implementation is very very very inelegant.

: If we ever rebind a profile that has not been suggested by the client, it will not be found.

Global oxr_action_sync_data (struct oxr_logger *log, struct oxr_session *sess, uint32_t countActionSets, const XrActiveActionSet *actionSets)
can be listed more than once with different paths!
Global oxr_binding_find_bindings_from_key (struct oxr_logger *log, struct oxr_interaction_profile *p, uint32_t key, size_t max_bounding_count, struct oxr_binding **bindings, size_t *out_binding_count)
Should return total count instead of fixed max.
Global oxr_create_messenger (struct oxr_logger *, struct oxr_instance *inst, const XrDebugUtilsMessengerCreateInfoEXT *, struct oxr_debug_messenger **out_mssngr)
call into inst to create this instead?
Global oxr_d3d_get_requirements (struct oxr_logger *log, struct oxr_system *sys, LUID *adapter_luid, D3D_FEATURE_LEVEL *min_feature_level)
implement better?
Global oxr_input_transform_create_chain
This should be configured using knowledge from the device as well as user options/policy.
Global oxr_instance_create (struct oxr_logger *log, const XrInstanceCreateInfo *createInfo, const struct oxr_extension_status *extensions, struct oxr_instance **out_instance)
check if this (and other creates) failed?
Global oxr_session_begin (struct oxr_logger *log, struct oxr_session *sess, const XrSessionBeginInfo *beginInfo)
we only support a single view config type per system right now
Global oxr_session_frame_end (struct oxr_logger *log, struct oxr_session *sess, const XrFrameEndInfo *frameEndInfo)
Make integer print to string.
Global oxr_session_frame_wait (struct oxr_logger *log, struct oxr_session *sess, XrFrameState *frameState)
this should be carefully synchronized, because there may be more than one session per instance.
Global oxr_session_request_exit (struct oxr_logger *log, struct oxr_session *sess)
start fading out the app.
Global oxr_space_locate (struct oxr_logger *log, struct oxr_space *spc, struct oxr_space *baseSpc, XrTime time, XrSpaceLocation *location)
Implement.
Global oxr_system_enumerate_blend_modes (struct oxr_logger *log, struct oxr_system *sys, XrViewConfigurationType viewConfigurationType, uint32_t environmentBlendModeCapacityInput, uint32_t *environmentBlendModeCountOutput, XrEnvironmentBlendMode *environmentBlendModes)
Take into account viewConfigurationType
Global oxr_system_fill_in (struct oxr_logger *log, struct oxr_instance *inst, XrSystemId systemId, uint32_t view_count, struct oxr_system *sys)
handle other subaction paths?
Global oxr_verify_localized_name (struct oxr_logger *, const char *string, uint32_t array_size, const char *name)
validate well-formed UTF-8?
Global oxr_vk_create_vulkan_device (struct oxr_logger *log, struct oxr_system *sys, const XrVulkanDeviceCreateInfoKHR *createInfo, VkDevice *vulkanDevice, VkResult *vulkanResult)
: clarify in spec
Global oxr_vk_create_vulkan_instance (struct oxr_logger *log, struct oxr_system *sys, const XrVulkanInstanceCreateInfoKHR *createInfo, VkInstance *vulkanInstance, VkResult *vulkanResult)
: clarify in spec
Global oxr_xdev_get_hand_tracking_at (struct oxr_logger *log, struct oxr_instance *inst, struct xrt_device *xdev, enum xrt_input_name name, XrTime at_time, struct xrt_hand_joint_set *out_value)
Moses doesn't know what he's doing here! Convert at_time to monotonic and give to device.
Global oxr_xrConvertTimespecTimeToTimeKHR (XrInstance instance, const struct timespec *timespecTime, XrTime *time)
do we need to check and see if this extension was enabled first?
Global p_can_open (struct xrt_prober *xp, struct xrt_prober_device *xpdev)
add more backends
Global p_create_system (struct xrt_prober *xp, struct xrt_session_event_sink *broadcast, struct xrt_system_devices **out_xsysd, struct xrt_space_overseer **out_xso)
Improve estimation selection logic.
Global p_factory_ensure_slam_frameserver (struct p_factory *fact)

The check for (XRT_FEATURE_SLAM && XRT_BUILD_DRIVER_* && debug_flag_is_correct) is getting duplicated in: p_open_video_device, p_list_video_devices, and p_factory_ensure_slam_frameserver (here) with small differences. Incorrectly modifying one will mess the others.

Similar to p_factory_ensure_frameserver but for SLAM sources.

Global p_get_string_descriptor (struct xrt_prober *xp, struct xrt_prober_device *xpdev, enum xrt_prober_string which_string, unsigned char *buffer, size_t length)

add more backends

make this unicode (utf-16)? utf-8 would be better...

Global PINHOLE_RADTAN5
merge these with t_tracking.h
Global PREFERRED_VIT_SYSTEM_LIBRARY
Get preferred system from systems found at build time
Global psmv_device_create (struct xrt_prober *xp, struct xrt_prober_device *xpdev, struct xrt_tracked_psmv *tracker)

cleanup to not leak

measure!

Global psmv_get_fusion_pose (struct psmv_device *psmv, enum xrt_input_name name, timepoint_ns when, struct xrt_space_relation *out_relation)

This is hack, fusion reports angvel relative to the device but it needs to be in relation to the base space. Rotating it with the device orientation is enough to get it into the right space, angular velocity is a derivative so needs a special rotation.

assuming that orientation is actually currently tracked.

Global pssense_get_fusion_pose (struct pssense_device *pssense, enum xrt_input_name name, uint64_t at_timestamp_ns, struct xrt_space_relation *out_relation)
This is hack, fusion reports angvel relative to the device but it needs to be in relation to the base space. Rotating it with the device orientation is enough to get it into the right space, angular velocity is a derivative so needs a special rotation.
Global psvr_device_get_tracked_pose (struct xrt_device *xdev, enum xrt_input_name name, uint64_t at_timestamp_ns, struct xrt_space_relation *out_relation)
Move this to the tracker.
Global quirk_frame (struct xrt_frame_sink *xfs, struct xrt_frame *xf)
this is not thread safe, but right no other thing has access to the frame (or should).
Global qwerty_controller_create (bool is_left, struct qwerty_hmd *qhmd)
: aim input not implemented
Global read_cv_mat (FILE *f, cv::Mat *m, const char *name)
We may have written things other than CV_32F and CV_64F.
Global receive_cam_frame (struct xrt_frame_sink *sink, struct xrt_frame *xf)
Update expgain independently for each camera like in WMR
Global receive_imu_sample (struct xrt_imu_sink *sink, struct xrt_imu_sample *s)
use 1000 if "average_imus" is false
Global receive_left_frame (struct xrt_frame_sink *sink, struct xrt_frame *)
Use one RS_LOG option for the entire driver
Global render_compute_layer_ubo_data::padding [3]
Implement separated samplers and images (and change to samplers[2])
Global required_vk_instance_extensions []
extension lists are duplicated as long strings in comp_vk_glue.c
Global rift_s_controller_get_fusion_pose (struct rift_s_controller *ctrl, enum xrt_input_name name, uint64_t at_timestamp_ns, struct xrt_space_relation *out_relation)
This is hack, fusion reports angvel relative to the device but it needs to be in relation to the base space. Rotating it with the device orientation is enough to get it into the right space, angular velocity is a derivative so needs a special rotation.
Global rift_s_tracker::slam
Right now, we are not consistent in how we interface with trackers. In particular, we have a xrt_tracked_slam field but not an equivalent for hand tracking.
Global rs2xrt_frame (struct rs_source *rs, rs2_frame *rframe, struct xrt_frame **out_xframe)
Use a stereo xrt_format
Global rs_hdev_correct_pose_from_basalt (struct xrt_pose pose)
Encode this transformation into constants
Global rs_hdev_correct_pose_from_kimera (struct xrt_pose pose)
Encode this transformation into constants
Global rs_source_configure_capture (struct xrt_fs *xfs, struct xrt_fs_capture_parameters *cp)
implement
Global rs_source_enumerate_modes (struct xrt_fs *xfs, struct xrt_fs_mode **out_modes, uint32_t *out_count)

only exposing the one stream configuration the user provided through the json configuration but we could show all possible stream setups.

The stereo_format being NONE is incorrect but one that supports frames in different memory regions does not exist yet.

Global rt8_unproject (const struct t_camera_model_params *hg_dist, const float u, const float v, float *out_x, float *out_y, float *out_z)
Decide if besides rpmax, it could be useful to have an rppmax field. A good starting point to having this would be using the sqrt of the max rpp2 value computed in the optimization of computeRpmax().
Global sdl_compositor::frame
Insert your own required members here
Global simulated_open_system_impl (struct xrt_builder *xb, cJSON *config, struct xrt_prober *xp, struct xrt_tracking_origin *origin, struct xrt_system_devices *xsysd, struct xrt_frame_context *xfctx, struct u_builder_roles_helper *ubrh)

Create a shared tracking origin on the system devices struct instead.

Make these be a option to the hmd create function, or just make it be there from the start.

Global survive_controller_get_hand_tracking (struct xrt_device *xdev, enum xrt_input_name name, uint64_t at_timestamp_ns, struct xrt_hand_joint_set *out_value, uint64_t *out_timestamp_ns)
place thumb preciely on the button that is touched/pressed
Global svr_hmd_create (struct svr_two_displays_distortion *distortion)
these should be true for the final product iirc but possibly not for the demo unit
Global svr_hmd_get_view_poses (struct xrt_device *xdev, const struct xrt_vec3 *default_eye_relation, uint64_t at_timestamp_ns, uint32_t view_count, struct xrt_space_relation *out_head_relation, struct xrt_fov *out_fovs, struct xrt_pose *out_poses)

: default_eye_relation inherits from the env var OXR_DEBUG_IPD_MM / oxr_session.c

you may need to invert this - I can't test locally

Global svr_mesh_calc (struct xrt_device *xdev, uint32_t view, float u, float v, struct xrt_uv_triplet *result)
: remove hard-coding and move to u_distortion_mesh
Global system_compositor_set_state (struct xrt_system_compositor *xsc, struct xrt_compositor *xc, bool visible, bool focused)
Locking?
Global system_compositor_set_z_order (struct xrt_system_compositor *xsc, struct xrt_compositor *xc, int64_t z_order)
Locking?
Global t_camera_distortion_model

Add RiftS's Fisheye62 to this enumerator once we have native support for it in our hand tracking and SLAM.

Feel free to add support for T_DISTORTION_OPENCV_RADTAN_4 or T_DISTORTION_OPENCV_RADTAN_12 whenever you have a camera that uses those.

Global t_camera_extra_info::views [2]
Hardcoded to 2 - needs to increase as we support headsets with more cameras.
Global T_DISTORTION_OPENCV_RADTAN_14
Feel free to implement RT14 (un)projection functions if you have a camera that actually has a tilted sensor.
Global t_hand_tracking_async_default_create (struct xrt_frame_context *xfctx, struct t_hand_tracking_sync *sync)
We came up with this value just by seeing what worked. With Index and WMR, we'd be around 40ms late by the time the camera frames arrived and were processed.
Global t_slam_get_tracked_pose (struct xrt_tracked_slam *xts, timepoint_ns when_ns, struct xrt_space_relation *out_relation)
This should not be cached, the same timestamp can be requested at a later time on the frame for a better prediction.
Global t_slam_receive_imu (struct xrt_imu_sink *sink, struct xrt_imu_sample *s)
There are many conversions like these between xrt and slam_tracker.hpp types. Implement a casting mechanism to avoid copies.
Global t_stereo_camera_calibration_from_json_v2
At some point it'll make sense to support different distortion models per-camera, but right now we don't have any cameras like that and the way t_stereo_camera_calib_alloc and (Stereo)CameraCalibrationWrapper work makes it pretty annoying.
Global teardown_devices (struct prober *p)

Free somewhere else

Free somewhere else

Global timepoint_ns
This is from u_time, duplicated to avoid layer violation.
Global u_device_assign_xdev_roles (struct xrt_device **xdevs, size_t xdev_count, int *head, int *left, int *right)
: do something with unassigned devices?
Global u_process_create_if_not_running (void)

If built without libbsd support, a placeholder value is returned that needs to be handled by the caller.

alternative implementation

Class u_sink_stereo_sbs_to_slam_sbs
Extend this to over-and-under frames!
Global u_var_draggable_u16::val
Unify "draggable" widgets interface.
Global U_WAIT_MEASURED_SCHEDULER_LATENCY_NS

Measure on Windows.

Global u_worker_group_push (struct u_worker_group *uwg, u_worker_group_func_t f, void *data)
Don't wait all, wait one.
Global update_session_state_locked (struct multi_system_compositor *msc)
Make this not be hardcoded.
Global v4l2_fs_configure_capture (struct xrt_fs *xfs, struct xrt_fs_capture_parameters *cp)
Global v4l2_fs_mainloop (void *ptr)
Sequence number and timestamp.
Global valve_index_setup_visual_trackers (struct lighthouse_system *lhs, struct xrt_device *head, struct vive_device *vive_head, struct xrt_frame_context *xfctx, struct xrt_prober *xp, struct xrt_slam_sinks *out_sinks, struct xrt_device **out_devices)
Using a single slot queue is wrong for SLAM
Global verify_projection_layer (struct oxr_session *sess, struct xrt_compositor *xc, struct oxr_logger *log, uint32_t layer_index, XrCompositionLayerProjection *proj, struct xrt_device *head, uint64_t timestamp)
More validation?
Global vf_fs_configure_capture (struct xrt_fs *xfs, struct xrt_fs_capture_parameters *cp)
Global vf_fs_frame (struct vf_fs *vid, GstSample *sample)
Proper sequence number and timestamp.
Global vive_controller_decode_message (struct vive_controller_device *d, struct vive_controller_message *message)
: Check if Vive controller on watchman2 is correctly handled with watchman2 codepath
Global vive_controller_decode_watchmanv2 (struct vive_controller_device *d, struct vive_controller_message *message)
: Parse lighthouse v2 data
Global vive_controller_get_hand_tracking (struct xrt_device *xdev, enum xrt_input_name name, uint64_t requested_timestamp_ns, struct xrt_hand_joint_set *out_value, uint64_t *out_timestamp_ns)
place thumb preciely on the button that is touched/pressed
Global vive_device_correct_pose_from_basalt (struct xrt_pose pose)
Test and fix for other headsets (vive/vivepro)
Global vive_get_slam_cams_calib (const struct vive_config *d, struct t_slam_camera_calibration *out_calib0, struct t_slam_camera_calibration *out_calib1)
: Index factory calibration is weird and doesn't seem to have the proper extrinsics. Let's overwrite them with some extrinsics I got from doing a calibration on my own headset. These seem to work better than native values. (mateosss)
Class vive_tracking_status
Creation flow is a bit broken for now, in the future this info should be closer to the tracker creation code, thus avoiding the need to pass it around like this.
Global wmr_cam_usb_thread (void *ptr)
Think this is not needed? what condition are we waiting for?
Global wmr_hmd::slam
Right now, we are not consistent in how we interface with trackers. In particular, we have a xrt_tracked_slam field but not an equivalent for hand tracking.
Global wmr_hmd_create (enum wmr_headset_type hmd_type, struct os_hid_device *hid_holo, struct os_hid_device *hid_ctrl, struct xrt_prober_device *dev_holo, enum u_logging_level log_level, struct xrt_device **out_hmd, struct xrt_device **out_handtracker, struct xrt_device **out_left_controller, struct xrt_device **out_right_controller)
Could reach this due to !XRT_HAVE_LIBUSB but the HMD should keep working
Global wmr_hmd_deactivate_odyssey_plus (struct wmr_hmd *wh)
Power down IMU, and maybe more.
Global wmr_hmd_deactivate_reverb (struct wmr_hmd *wh)
Power down IMU, and maybe more.
Class wmr_source

Currently only properly handling tracking cameras, move IMU and other sources here

Global wmr_source_push_imu_packet (struct xrt_fs *xfs, timepoint_ns t, struct xrt_vec3 accel, struct xrt_vec3 gyro)

IMU data should be generated from within the data source, but right now we need this function because it is being generated from wmr_hmd

Should this method receive raw or calibrated samples? Currently receiving raw because Basalt can calibrate them, but other systems can't.

Global XR_USE_TIMESPEC
Move these to the build system instead.
Global xrt::auxiliary::d3d::d3d12::createCommandLists (ID3D12Device &device, ID3D12CommandAllocator &command_allocator, ID3D12Resource &resource, enum xrt_swapchain_usage_bits bits, wil::com_ptr< ID3D12CommandList > out_acquire_command_list, wil::com_ptr< ID3D12CommandList > out_release_command_list)

do we need to set queue access somehow?

No idea if this is right, might depend on whether it's the compute or graphics compositor!

Global xrt::auxiliary::math::detail::LowPassIIR< Value, Scalar >::addSample (Value const &sample, timepoint_ns timestamp_ns, Scalar weight=1)
limit max dt?
Global xrt::auxiliary::tracking::AbsolutePositionLeverArmMeasurement::AbsolutePositionLeverArmMeasurement (MeasurementVector const &measurement, MeasurementVector const &knownLocationInBodySpace, MeasurementVector const &variance)
the point we get from the camera isn't the center of the ball, but the center of the visible surface of the ball - a closer approximation would be translation along the vector to the center of projection....
Global xrt::auxiliary::tracking::calibration_get_undistort_map (t_camera_calibration &calib, cv::InputArray rectify_transform_optional=cv::noArray(), cv::Mat new_camera_matrix_optional=cv::Mat())
Scale Our intrinsics if the frame size we request
Global xrt::auxiliary::tracking::psmv::do_view (TrackerPSMV &t, View &view, cv::Mat &grey, cv::Mat &rgb)
Re-enable masks.
Global xrt::auxiliary::tracking::psmv::process (TrackerPSMV &t, struct xrt_frame *xf)

tune cutoff for residual arbitrarily "too large"

do we need to avoid claiming the same counterpart several times?

don't really need the square root to be done here.

Global xrt::auxiliary::tracking::psvr::get_pose (TrackerPSVR &t, timepoint_ns when_ns, struct xrt_space_relation *out_relation)
assuming that orientation is actually currently tracked.
Global xrt::auxiliary::tracking::psvr::sample_line (cv::Mat &src, const cv::Point2i &start, const cv::Point2i &end, int *inside_length)
: we are counting pixels rather than measuring length - bresenhams may introduce some inaccuracy here.
Global xrt::auxiliary::tracking::slam::filter_pose (TrackerSlam &t, timepoint_ns when_ns, struct xrt_space_relation *out_relation)
Implement the quaternion averaging with a m_ff_vec4_f32 and normalization. Although it would be best to have a way of generalizing types before so as to not have redundant copies of ff logic.
Global xrt::auxiliary::tracking::slam::predict_pose_from_imu (TrackerSlam &t, timepoint_ns when_ns, xrt_space_relation base_rel, timepoint_ns base_rel_ts, struct xrt_space_relation *out_relation)
Instead of using same a and g values, do an interpolated sample like this:
Global xrt::auxiliary::tracking::slam::TrackerSlam::gravity_correction
Should be automatically computed instead of required to be filled manually through the UI.
Global xrt::auxiliary::tracking::slam::xr2gt_pose (const xrt_pose &gt_origin, const xrt_pose &xr_pose)
Right now this is hardcoded for Basalt and the EuRoC vicon datasets groundtruth and ignores orientation. Applies a fixed transformation so that the tracked and groundtruth trajectories origins and general motion match. The usual way of evaluating trajectory errors in SLAM requires to first align the trajectories through a non-linear optimization (e.g. gauss newton) so that they are as similar as possible. For this you need the entire tracked trajectory to be known beforehand, which makes it not suitable for reporting an error metric in realtime. See this 2-page paper for more info on trajectory alignment: https://ylatif.github.io/movingsensors/cameraReady/paper07.pdf
Global xrt::tracking::hand::mercury::back_project_keypoint_output (struct HandTracking *hgt, int hand_idx, int view_idx)
We're trivially rewriting the stereographic projection for like the 2nd or 3rd time here. We should add an Eigen template for this instead.
Global xrt::tracking::hand::mercury::check_outside_view (struct HandTracking *hgt, struct t_camera_extra_info_one_view boundary, xrt_vec2 &keypoint)
Optimize: Most of this can be calculated once at startup
Global xrt::tracking::hand::mercury::getCalibration (struct HandTracking *hgt, t_stereo_camera_calibration &calibration)

Really? We can totally support cameras with varying resolutions.

what are these magic values? they're probably turning the OpenCV formalism into OpenXR, but especially what gives with negating orientation.x?

Global xrt::tracking::hand::mercury::handle_changed_image_size (HandTracking *hgt, xrt_size &new_one_view_size)
optimize: can't we just scale camera matrix/etc correctly?
Global xrt::tracking::hand::mercury::HandTracking::cCallbackProcess (struct t_hand_tracking_sync *ht_sync, struct xrt_frame *left_frame, struct xrt_frame *right_frame, struct xrt_hand_joint_set *out_left_hand, struct xrt_hand_joint_set *out_right_hand, uint64_t *out_timestamp_ns)

does this go here?

optimize: We can have one of these on each thread

Global xrt::tracking::hand::mercury::lm::calc_stability_curl_multiplier (const OptimizerFinger< HandScalar > &finger_last, HandScalar obs_curl)
Use the neural net's output variance somehow
Global xrt::tracking::hand::mercury::lm::diff_stereographic_reprojection_error (const Vec3< T > &model_joint_pos_rel_camera_, const vec2_5 &observed_ray_sg, const HandScalar confidence_xy, const HandScalar stereographic_radius, ResidualHelper< T > &helper)
This works well but we can get a way more "rooted in math" way of increasing repro error with low-confidence measurements than this.
Global xrt::tracking::hand::mercury::lm::eval_hand_set_rel_orientations (const OptimizerHand< T > &opt, Orientations54< T > &rel_orientations)
: In this version of our tracking, this is always constant.
Global xrt::tracking::hand::mercury::lm::normalize_vector_inplace (Vec3< T > &vector)
any good template ways to get epsilon for float,double,jet?
Global xrt::tracking::hand::mercury::lm::opt_run (KinematicHandLM &state, one_frame_input &observation, xrt_hand_joint_set &out_viz_hand)

We don't yet know what "good" termination conditions are.

We need to do a parameter sweep on initial_trust_region_radius.

Is there a zero-copy way of doing this?

Global xrt::tracking::hand::mercury::lm::OptimizerHandInit (OptimizerHand< T > &opt, Quat< T > &pre_rotation)
needed?
Global xrt::tracking::hand::mercury::make_projection_instructions (t_camera_model_params &dist, bool flip_after, float expand_val, float twist, Eigen::Array< float, 3, 21 > &joints, projection_instructions &out_instructions, hand21_2d &out_hand)

this is probably wrong, should probably be negated

optimize

Global xrt::tracking::hand::mercury::MIN_HAND_SIZE
These are not backed up by real anthropometry data; they are just guesstimates. Patches welcome!
Global xrt::tracking::hand::mercury::normalizeGrayscaleImage (cv::Mat &data_in, cv::Mat &data_out)
optimize
Class xrt::tracking::hand::mercury::one_frame_one_view
Ask Rylie if adding = {} only does something if we do one_frame_one_view bla = {}.
Global xrt::tracking::hand::mercury::refine_center_of_distribution (struct HandTracking *hgt, const float *data, int coarse_x, int coarse_y, int w, int h, float *out_refined_x, float *out_refined_y)
this is not good and has at least one edge case, make it more readable and link to a jupyter notebook
Global xrt::tracking::hand::mercury::run_keypoint_estimation (void *ptr)
when you change this to have +Z-forward
Global xrt::tracking::hand::mercury::StereographicDistort (projection_state &mi)
optimize
Global xrt::tracking::hand::mercury::stop_everything_if_hands_are_overlapping (struct HandTracking *hgt)

This looks like it sucks, but it doesn't given the current architecture.

I really want to try making a discrete optimizer that looks at recent info and decides whether to drop tracking for a hand, switch its handedness or switch to some forthcoming overlapping-hands model.

Global xrt_fs::slam_stream_start )(struct xrt_fs *xfs, struct xrt_slam_sinks *sinks)
Fix this incongruence. Maybe rename the interface to xrt_data_source.
Class xrt_imu_sample
Make xrt_tracked_psmv and xrt_tracked_psvr use this
Class xrt_imu_sink
Make xrt_tracked_psmv and xrt_tracked_psvr implement this
Class xrt_rect_f32
Unify xrt_rect and xrt_rect_f32 field names
Class xrt_tracked_psmv
How do we communicate ball colour change?
Global xrt_tracked_psmv::get_tracked_pose )(struct xrt_tracked_psmv *, enum xrt_input_name name, timepoint_ns when_ns, struct xrt_space_relation *out_relation)
Should we add a out_time argument as a way to signal min and maximum, and as such only do interpelation between different captured frames.
Class xrt_tracked_psvr
How do we communicate led lighting status?
Class xrt_tracking_sample
Replace with xrt_imu_sample
Global xrt_tracking_type
Is none, Colour, IR, Magnetic the kind of type we need to know about?