最新消息:雨落星辰是一个专注网站SEO优化、网站SEO诊断、搜索引擎研究、网络营销推广、网站策划运营及站长类的自媒体原创博客

python - dsac_tools(calculate essential matrix using pytorch) computational problem - Stack Overflow

programmeradmin2浏览0评论

I am trying to find a Python model that can calculate the essential matrix so I can integrate it into my machine learning model.

def _homo(x):
    # input: x [N, 2] or [batch_size, N, 2]
    # output: x_homo [N, 3]  or [batch_size, N, 3]
    assert len(x.size()) in [2, 3]
    print(f"x: {x.size()[0]}, {x.size()[1]}, {x.dtype}, {x.device}")
    if len(x.size())==2:
        ones = torch.ones(x.size()[0], 1, dtype=x.dtype, device=x.device)
        x_homo = torch.cat((x, ones), 1)
    elif len(x.size())==3:
        ones = torch.ones(x.size()[0], x.size()[1], 1, dtype=x.dtype, device=x.device)
        x_homo = torch.cat((x, ones), 2)
    return x_homo

def _de_homo(x_homo):
    # input: x_homo [N, 3] or [batch_size, N, 3]
    # output: x [N, 2] or [batch_size, N, 2]
    assert len(x_homo.size()) in [2, 3]
    epi = 1e-10
    if len(x_homo.size())==2:
        x = x_homo[:, :-1]/((x_homo[:, -1]+epi).unsqueeze(-1))
    else:
        x = x_homo[:, :, :-1]/((x_homo[:, :, -1]+epi).unsqueeze(-1))
    return x
def _normalize_XY(X, Y):
    """ The Hartley normalization. Following .py#L157 
    corrected with 
    and  """
    if X.size()[0] != Y.size()[0]:
        raise ValueError("Number of points don't match.")
    X = _homo(X)
    mean_1 = torch.mean(X[:, :2], dim=0, keepdim=True)
    S1 = np.sqrt(2) / torch.mean(torch.norm(X[:, :2]-mean_1, 2, dim=1))
    # print(mean_1.size(), S1.size())
    T1 = torch.tensor([[S1,0,-S1*mean_1[0, 0]],[0,S1,-S1*mean_1[0, 1]],[0,0,1]], device=X.device)
    X_normalized = _de_homo(torch.mm(T1, X.t()).t()) # ideally zero mean (x, y), and sqrt(2) average norm

    # xxx = X_normalized.numpy()
    # print(np.mean(xxx, axis=0))
    # print(np.mean(np.linalg.norm(xxx, 2, axis=1)))

    Y = _homo(Y)
    mean_2 = torch.mean(Y[:, :2], dim=0, keepdim=True)
    S2 = np.sqrt(2) / torch.mean(torch.norm(Y[:, :2]-mean_2, 2, dim=1))
    T2 = torch.tensor([[S2,0,-S2*mean_2[0, 0]],[0,S2,-S2*mean_2[0, 1]],[0,0,1]], device=X.device)
    Y_normalized = _de_homo(torch.mm(T2, Y.t()).t())

    return X_normalized, Y_normalized, T1, T2
def _E_from_XY(X, Y, K, W=None, if_normzliedK=False, normalize=True, show_debug=False): # Ref: .py#L55
    """ Normalized Eight Point Algorithom for E: [Manmohan] In practice, one would transform the data points by K^{-1}, then do a Hartley normalization, then estimate the F matrix (which is now E matrix), then set the singular value conditions, then denormalize. Note that it's better to set singular values first, then denormalize.
        X, Y: [N, 2] """
    if if_normzliedK:
        X_normalizedK = X
        Y_normalizedK = Y
    else:
        X_normalizedK = _de_homo(torch.mm(torch.inverse(K), _homo(X).t()).t())
        Y_normalizedK = _de_homo(torch.mm(torch.inverse(K), _homo(Y).t()).t())

    if normalize:
        X, Y, T1, T2 = _normalize_XY(X_normalizedK, Y_normalizedK)
    else:
        X, Y = X_normalizedK, Y_normalizedK
    # print(T1)
    # print(T2)
    # print(X)

    xx = torch.cat([X.t(), Y.t()], dim=0)
    XX = torch.stack([
        xx[2, :] * xx[0, :], xx[2, :] * xx[1, :], xx[2, :],
        xx[3, :] * xx[0, :], xx[3, :] * xx[1, :], xx[3, :],
        xx[0, :], xx[1, :], torch.ones_like(xx[0, :])
    ], dim=0).t() # [N, 9]
    # print(XX.size())
    if W is not None:
        XX = torch.mm(W, XX) # [N, 9]
    # print(XX[:2])
    U, D, V = torch.svd(XX, some=True)
    if show_debug:
        print('[info.Debug @_E_from_XY] Singualr values of XX:\n', D.numpy())

    # U_np, D_np, V_np = np.linalg.svd(XX.numpy())

    F_recover = torch.reshape(V[:, -1], (3, 3))
    # print('-', F_recover)

    FU, FD, FV= torch.svd(F_recover, some=True)
    if show_debug:
        print('[info.Debug @_E_from_XY] Singular values for recovered E(F):\n', FD.numpy())

    # FDnew = torch.diag(FD);
    # FDnew[2, 2] = 0;
    # F_recover_sing = torch.mm(FU, torch.mm(FDnew, FV.t()))
    S_110 = torch.diag(torch.tensor([1., 1., 0.], dtype=FU.dtype, device=FU.device))
    E_recover_110 = torch.mm(FU, torch.mm(S_110, FV.t()))
    # F_recover_sing_rescale = F_recover_sing / torch.norm(F_recover_sing) * torch.norm(F)

    # print(E_recover_110)
    if normalize:
        E_recover_110 = torch.mm(T2.t(), torch.mm(E_recover_110, T1))
    return E_recover_110
K = torch.tensor(
        [[100, 0, 2],  # Focal length fx = 100, principal point cx = 2
        [0, 100, 2],  # Focal length fy = 100, principal point cy = 2
        [0, 0, 1]]    # Homogeneous coordinate
    , dtype=torch.float32)  # Shape: (1, 3, 3)
X = torch.tensor([
    [0,0],
    [0,1],
    [0,2],
    [0,3],
    [1,0],
    [1,1],
    [1,2],
    [1,3],
        ], dtype=torch.float32)
Y = torch.tensor([
    [0,0],
    [0,1],
    [0,2],
    [0,3],
    [1,0],
    [1,1],
    [1,2],
    [1,3],
        ], dtype=torch.float32)
E = _E_from_XY(X, Y, K)

The code above is from dsac. However, after i input the same X and Y, it output

K = torch.tensor(
        [[100, 0, 2],  # Focal length fx = 100, principal point cx = 2
        [0, 100, 2],  # Focal length fy = 100, principal point cy = 2
        [0, 0, 1]]    # Homogeneous coordinate
    , dtype=torch.float32)  # Shape: (1, 3, 3)
X = torch.tensor([
    [0,0],
    [0,1],
    [0,2],
    [0,3],
    [1,0],
    [1,1],
    [1,2],
    [1,3],
        ], dtype=torch.float32)
Y = torch.tensor([
    [0,0],
    [0,1],
    [0,2],
    [0,3],
    [1,0],
    [1,1],
    [1,2],
    [1,3],
        ], dtype=torch.float32)
E = _E_from_XY(X, Y, K)
print(E)

output:

tensor([[-1.5882e+03, -1.3286e+04, -1.4975e+02],
        [ 1.3286e+04, -1.6954e+03,  1.8782e+02],
        [ 1.0210e+02, -2.1076e+02, -4.2263e-01]])

instead of zeros(is the flow is zero, i assume the essential matrix should also be a 3 * 3 matrix of zeros). I also tried other combinations but the results doesn't seem to be right.

i tried to replace

def _de_homo(x_homo):
    # input: x_homo [N, 3] or [batch_size, N, 3]
    # output: x [N, 2] or [batch_size, N, 2]
    assert len(x_homo.size()) in [2, 3]
    epi = 1e-10
    if len(x_homo.size())==2:
        x = x_homo[:, :-1]/((x_homo[:, -1]+epi).unsqueeze(-1))
    else:
        x = x_homo[:, :, :-1]/((x_homo[:, :, -1]+epi).unsqueeze(-1))
    return x

with

def _de_homo(self, x_homo):
    # Avoid division by zero with a small epsilon and clamping
    denom = torch.clamp(x_homo[..., -1:], min=1e-100)
    return x_homo[..., :-1] / denom

to ensure that there wasn't cause by the epsilon.

I also tried to directly calculate XX from X and Y:

XX = torch.stack([
    X[:, 0] * Y[:, 0],  # x1 * x2
    X[:, 0] * Y[:, 1],  # x1 * y2
    X[:, 0],            # x1
    X[:, 1] * Y[:, 0],  # y1 * x2
    X[:, 1] * Y[:, 1],  # y1 * y2
    X[:, 1],            # y1
    Y[:, 0],            # x2
    Y[:, 1],            # y2
    torch.ones_like(X[:, 0])  # 1
], dim=1)

Is there any problem with the calculation itself? or because of my input format.

发布评论

评论列表(0)

  1. 暂无评论