Browse Source

Rework rank-detection tolerance in pseudoinverse

master
Julian Kent 4 years ago committed by Julian Kent
parent
commit
15e54ceda1
  1. 23
      matrix/PseudoInverse.hxx
  2. 18
      test/pseudoInverse.cpp

23
matrix/PseudoInverse.hxx

@ -12,32 +12,20 @@
* Full rank Cholesky factorization of A * Full rank Cholesky factorization of A
*/ */
template<typename Type> template<typename Type>
void fullRankCholeskyTolerance(Type &tol) Type typeEpsilon();
{
tol /= 10000000;
}
template<> inline template<> inline
void fullRankCholeskyTolerance<double>(double &tol) float typeEpsilon<float>()
{ {
tol /= 1000000000000000000.0; return FLT_EPSILON;
} }
template<typename Type, size_t N> template<typename Type, size_t N>
SquareMatrix<Type, N> fullRankCholesky(const SquareMatrix<Type, N> & A, SquareMatrix<Type, N> fullRankCholesky(const SquareMatrix<Type, N> & A,
size_t& rank) size_t& rank)
{ {
// Compute // Loses one ulp accuracy per row of diag, relative to largest magnitude
// dA = np.diag(A) const Type tol = N * typeEpsilon<Type>() * A.diag().max();
// tol = np.min(dA[dA > 0]) * 1e-9
Vector<Type, N> d = A.diag();
Type tol = d.max();
for (size_t k = 0; k < N; k++) {
if ((d(k) > 0) && (d(k) < tol)) {
tol = d(k);
}
}
fullRankCholeskyTolerance<Type>(tol);
Matrix<Type, N, N> L; Matrix<Type, N, N> L;
@ -59,7 +47,6 @@ SquareMatrix<Type, N> fullRankCholesky(const SquareMatrix<Type, N> & A,
L(i, r) = A(i, k) - LL; L(i, r) = A(i, k) - LL;
} }
} }
if (L(k, r) > tol) { if (L(k, r) > tol) {
L(k, r) = sqrt(L(k, r)); L(k, r) = sqrt(L(k, r));

18
test/pseudoInverse.cpp

@ -127,28 +127,26 @@ int main()
TEST((retM1 - retM_check).abs().max() < 1e-5); TEST((retM1 - retM_check).abs().max() < 1e-5);
TEST((retN1 - retN_check).abs().max() < 1e-5); TEST((retN1 - retN_check).abs().max() < 1e-5);
float float_scale = 1.f;
fullRankCholeskyTolerance(float_scale);
double double_scale = 1.;
fullRankCholeskyTolerance(double_scale);
TEST(static_cast<double>(float_scale) > double_scale);
// Real-world test case // Real-world test case
const float real_alloc[5][6] = {{ 0.794079, 0.794079, 0.794079, 0.794079, 0.0000, 0.0000}, const float real_alloc[5][6] = {
{ 0.794079, 0.794079, 0.794079, 0.794079, 0.0000, 0.0000},
{ 0.607814, 0.607814, 0.607814, 0.607814, 1.0000, 1.0000}, { 0.607814, 0.607814, 0.607814, 0.607814, 1.0000, 1.0000},
{-0.672516, 0.915642, -0.915642, 0.672516, 0.0000, 0.0000}, {-0.672516, 0.915642, -0.915642, 0.672516, 0.0000, 0.0000},
{ 0.159704, 0.159704, 0.159704, 0.159704, -0.2500, -0.2500}, { 0.159704, 0.159704, 0.159704, 0.159704, -0.2500, -0.2500},
{ 0.607814, -0.607814, 0.607814, -0.607814, 1.0000, 1.0000}}; { 0.607814, -0.607814, 0.607814, -0.607814, 1.0000, 1.0000}
};
Matrix<float, 5, 6> real ( real_alloc); Matrix<float, 5, 6> real ( real_alloc);
Matrix<float, 6, 5> real_pinv = geninv(real); Matrix<float, 6, 5> real_pinv = geninv(real);
// from SVD-based inverse // from SVD-based inverse
const float real_pinv_expected_alloc[6][5] = {{ 2.096205, -2.722267, 2.056547, 1.503279, 3.098087}, const float real_pinv_expected_alloc[6][5] = {
{ 2.096205, -2.722267, 2.056547, 1.503279, 3.098087},
{ 1.612621, -1.992694, 2.056547, 1.131090, 2.275467}, { 1.612621, -1.992694, 2.056547, 1.131090, 2.275467},
{-1.062688, 2.043479, -2.056547, -0.927950, -2.275467}, {-1.062688, 2.043479, -2.056547, -0.927950, -2.275467},
{-1.546273, 2.773052, -2.056547, -1.300139, -3.098087}, {-1.546273, 2.773052, -2.056547, -1.300139, -3.098087},
{-0.293930, 0.443445, 0.000000, -0.226222, 0.000000}, {-0.293930, 0.443445, 0.000000, -0.226222, 0.000000},
{-0.293930, 0.443445, 0.000000, -0.226222, 0.000000}}; {-0.293930, 0.443445, 0.000000, -0.226222, 0.000000}
};
Matrix<float, 6, 5> real_pinv_expected(real_pinv_expected_alloc); Matrix<float, 6, 5> real_pinv_expected(real_pinv_expected_alloc);
TEST((real_pinv - real_pinv_expected).abs().max() < 1e-4); TEST((real_pinv - real_pinv_expected).abs().max() < 1e-4);

Loading…
Cancel
Save