@oringa Any chance you have a solution I could look at? Trying to get this working but Iâ€™m not quite thereâ€¦ here is what Iâ€™ve got so far using the resources linked above. Iâ€™m quite new to this so any insight/help is welcome and much appreciated!

Iâ€™m also doing this in the context of WebODMâ€¦ Iâ€™ve modified the NodeODM Docker image to output the opensfm data so that I can access it at --media-dir location for running this new action below.

[edit] codeâ€¦ its pretty close but still the projection is off a bit

```
def run_dataset(data: DataSet, filename, x, y, output):
"""Compute the position of a point in 3D from 2D pixel
Args:
output: undistorted
"""
# Load the reconstruction, shot, pose and camera
rec: Reconstruction = data.load_reconstruction()[0]
shot: Shot = rec.shots[filename]
pose: Pose = shot.pose
camera: Camera = shot.camera
# relative pixel coords
pt2d = camera.pixel_to_normalized_coordinates((x, y))
# bearing
bearing = camera.pixel_bearing(pt2d)
print('\nBearing')
print(bearing)
# bearing /= bearing[2]
# Camera Dimensions
print('\nCamera Dimensions')
print(f'{camera.width} x {camera.width}')
# Load depthmap dataset
udata_path = os.path.join(data.data_path, output)
udata = dataset.UndistortedDataSet(data, udata_path)
dMap, plane, score, nghbr, nghbrs = udata.load_raw_depthmap(shot.id)
print('\nDepthmap Size')
print(f'{dMap[0].size} x {dMap[1].size}')
print('\nMax of camera width/height')
m = max(camera.width, camera.height)
print(m)
print('\nScaling for X/Y')
scaleX = dMap[0].size / m
scaleY = dMap[1].size / m
print(f'{scaleX} / {scaleY}')
print('\nDepth Coordinates')
dx = int(round((scaleX * float(x)), 1))
dy = int(round((scaleY * float(y)), 1))
print(f'{dx} x {dy}')
print('\nPoint Depth')
depth = dMap[dx][dy]
if (depth == 0.0):
depth = estimate_depth(dMap, dx, dy, 2)
print(depth)
# adjust depth
pt3d = bearing * depth
print('\nPoint 3D (bearing * depth)')
print(pt3d)
##
xxxx = shot.project(pt3d)
pt2D_px = camera.normalized_to_pixel_coordinates(xxxx)
print('\nPixel Reverse Projection')
print(pt2D_px)
##
# this point is in the camera coordinate system
pt3d_world = pose.inverse().transform(pt3d)
print('\nPoint 3D World')
print(pt3d_world)
# gps coords
gps_world = rec.get_reference().to_lla(*pt3d_world)
print('\nGPS World')
print(gps_world)
# ECEF From LLA
ecef = ecef_from_lla(*gps_world)
print('\nECEF')
print(ecef)
# topocentric
topo = topocentric_from_lla(gps_world[0], gps_world[1], gps_world[2], 0, 0, 0)
print('\nTopocentric')
print(topo)
# UTM
utmdata = utm.from_latlon(gps_world[0], gps_world[1])
print('\nUTM')
print(utmdata)
# 3D Point Annotation
print('\nPoint Annotation')
print(f'{utmdata[0]}, {utmdata[1]}, {gps_world[2]}')
def estimate_depth(map, x, y, steps):
step = 1
depth = map[x][y]
print('point depth')
print(depth)
if depth != 0.0:
while(step <= steps):
sum = 0
devisor = 0
for i in range(x-step, x+step+1):
devisor += 1
for j in range(y-step, y+step+1):
sum = sum + map[i][j]
devisor += 1
average = sum / devisor
if depth != 0:
depth = (depth + average) / 2
else:
depth = average
print('average depth')
print(depth)
step += 1
return depth
```