Skip to content

Commit

Permalink
Merge pull request #236 from Simple-Robotics/topic/gar/devel
Browse files Browse the repository at this point in the history
topic/gar/devel
  • Loading branch information
ManifoldFR authored Nov 4, 2024
2 parents 44cabf0 + 4d2671f commit 42d281f
Show file tree
Hide file tree
Showing 30 changed files with 777 additions and 519 deletions.
8 changes: 8 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,20 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

- `SolverProxDDP`: add temporary vectors for linesearch
- `SolverProxDDP`: remove exceptions from `computeMultipliers()`, return a bool flag
- `gar`: rework and move sparse matrix utilities to `gar/utils.hpp`
- `SolverProxDDP`: Rename `maxRefinementSteps_` and `refinementThreshold_` to snake-case
- `SolverProxDDP`: make `linesearch_` public
- Change uses of `ConstraintSetBase` template class to `ConstraintSetTpl` (following changes in proxsuite-nlp 0.9.0) ([#223](https://github.com/Simple-Robotics/aligator/pull/233))
- [gar] Throw an exception if trying to instantiate `ParallelRiccatiSolver` with num_threads smaller than 2.
- [API BREAKING] Rename friction cone for centroidal into CentroidalFrictionCone ([#234](https://github.com/Simple-Robotics/aligator/pull/234))
- Change the linear multibody friction cone to the true "ice cream" cone ([#238](https://github.com/Simple-Robotics/aligator/pull/238))
- [gar] Rework `RiccatiSolverDense` to not use inner struct `FactorData`
- Various changes to `gar` tests and `test_util`, add `LQRKnot::isApprox()`

### Removed

- Default constructor for `LQRProblemTpl`
- Removed header `gar/fwd.hpp` with forward-declarations

## [0.9.0] - 2024-10-11

Expand Down
27 changes: 6 additions & 21 deletions bindings/python/src/gar/expose-gar.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
#include "aligator/python/blk-matrix.hpp"
#include "aligator/gar/lqr-problem.hpp"
#include "aligator/gar/riccati-base.hpp"
#include "aligator/gar/utils.hpp"

#include "aligator/python/utils.hpp"
#include "aligator/python/visitors.hpp"
Expand All @@ -24,17 +23,6 @@ using context::VectorXs;

using knot_vec_t = lqr_t::KnotVector;

bp::dict lqr_sol_initialize_wrap(const lqr_t &problem) {
bp::dict out;
auto ss = lqrInitializeSolution(problem);
auto &[xs, us, vs, lbdas] = ss;
out["xs"] = xs;
out["us"] = us;
out["vs"] = vs;
out["lbdas"] = lbdas;
return out;
}

static void exposeBlockMatrices() {
BlkMatrixPythonVisitor<BlkMatrix<MatrixXs, 2, 2>>::expose("BlockMatrix22");
BlkMatrixPythonVisitor<BlkMatrix<VectorXs, 4, 1>>::expose("BlockVector4");
Expand All @@ -61,6 +49,8 @@ void exposeParallelSolver();
void exposeDenseSolver();
// fwd-declare exposeProxRiccati()
void exposeProxRiccati();
// fwd-declare exposeGarUtils()
void exposeGarUtils();

void exposeGAR() {

Expand Down Expand Up @@ -97,6 +87,9 @@ void exposeGAR() {
.def_readwrite("Gu", &knot_t::Gu)
.def_readwrite("gamma", &knot_t::gamma)
//
.def("isApprox", &knot_t::isApprox,
("self"_a, "prec"_a = std::numeric_limits<Scalar>::epsilon()))
//
.def(CopyableVisitor<knot_t>())
.def(PrintableVisitor<knot_t>());

Expand Down Expand Up @@ -126,15 +119,7 @@ void exposeGAR() {
.def("forward", &riccati_base_t::forward,
("self"_a, "xs", "us", "vs", "lbdas", "theta"_a = std::nullopt));

bp::def(
"lqrDenseMatrix",
+[](const lqr_t &problem, Scalar mudyn, Scalar mueq) {
auto mat_rhs = lqrDenseMatrix(problem, mudyn, mueq);
return bp::make_tuple(std::get<0>(mat_rhs), std::get<1>(mat_rhs));
},
("problem"_a, "mudyn", "mueq"));

bp::def("lqrInitializeSolution", lqr_sol_initialize_wrap, ("problem"_a));
exposeGarUtils();

#ifdef ALIGATOR_WITH_CHOLMOD
exposeCholmodSolver();
Expand Down
44 changes: 44 additions & 0 deletions bindings/python/src/gar/expose-utils.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
#include "aligator/python/fwd.hpp"
#include "aligator/gar/utils.hpp"

#include <eigenpy/std-array.hpp>

namespace aligator::python {
using namespace gar;

using context::Scalar;
using lqr_t = LQRProblemTpl<context::Scalar>;

bp::dict lqr_sol_initialize_wrap(const lqr_t &problem) {
bp::dict out;
auto ss = lqrInitializeSolution(problem);
auto &[xs, us, vs, lbdas] = ss;
out["xs"] = xs;
out["us"] = us;
out["vs"] = vs;
out["lbdas"] = lbdas;
return out;
}

void exposeGarUtils() {

bp::def(
"lqrDenseMatrix",
+[](const lqr_t &problem, Scalar mudyn, Scalar mueq) {
auto mat_rhs = lqrDenseMatrix(problem, mudyn, mueq);
return bp::make_tuple(std::get<0>(mat_rhs), std::get<1>(mat_rhs));
},
("problem"_a, "mudyn", "mueq"));

bp::def("lqrCreateSparseMatrix", lqrCreateSparseMatrix<Scalar>,
("problem"_a, "mudyn", "mueq", "mat", "rhs", "update"),
"Create or update a sparse matrix from an LQRProblem.");

bp::def("lqrInitializeSolution", lqr_sol_initialize_wrap, ("problem"_a));

bp::def("lqrComputeKktError", lqrComputeKktError<Scalar>,
("problem"_a, "xs", "us", "vs", "lbdas", "mudyn", "mueq", "theta",
"verbose"_a = false),
"Compute the KKT residual of the LQR problem.");
}
} // namespace aligator::python
3 changes: 2 additions & 1 deletion examples/acrobot.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,11 @@ class Args(ArgsBase):
)

tol = 1e-3
mu_init = 0.01
mu_init = 10.0
solver = aligator.SolverProxDDP(tol, mu_init=mu_init, verbose=aligator.VERBOSE)
solver.max_iters = 200
solver.rollout_type = aligator.ROLLOUT_LINEAR
solver.linear_solver_choice = aligator.LQ_SOLVER_STAGEDENSE
solver.setup(problem)


Expand Down
154 changes: 3 additions & 151 deletions gar/include/aligator/gar/cholmod-solver.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,131 +6,9 @@
#include <Eigen/CholmodSupport>

#include "lqr-problem.hpp"
#include "utils.hpp"
#include "aligator/context.hpp"

namespace aligator::gar {
namespace helpers {
/// @brief Helper to assign a dense matrix into a range of coefficients of a
/// sparse matrix.
template <bool Update, typename InType, typename OutScalar>
void sparseAssignDenseBlock(Eigen::Index i0, Eigen::Index j0,
const Eigen::DenseBase<InType> &input,
Eigen::SparseMatrix<OutScalar> &out) {
assert(i0 + input.rows() <= out.rows() && "Inconsistent rows");
assert(j0 + input.cols() <= out.cols() && "Inconsistent cols");
using Eigen::Index;
for (Index i = 0; i < input.rows(); i++) {
for (Index j = 0; j < input.cols(); j++) {
if constexpr (Update) {
out.coeffRef(i0 + i, j0 + j) = input(i, j);
} else {
out.insert(i0 + i, j0 + j) = input(i, j);
}
}
}
}
} // namespace helpers

template <bool Update, typename Scalar>
void lqrCreateSparseMatrix(const LQRProblemTpl<Scalar> &problem,
const Scalar mudyn, const Scalar mueq,
Eigen::SparseMatrix<Scalar> &mat,
Eigen::Matrix<Scalar, -1, 1> &rhs) {
using Eigen::Index;
const uint nrows = lqrNumRows(problem);
using knot_t = LQRKnotTpl<Scalar>;
const auto &knots = problem.stages;

if constexpr (!Update) {
mat.conservativeResize(nrows, nrows);
mat.setZero();
}

rhs.conservativeResize(nrows);
rhs.setZero();
const size_t N = size_t(problem.horizon());
uint idx = 0;
{
uint nc0 = problem.nc0();
rhs.head(nc0) = problem.g0;
helpers::sparseAssignDenseBlock<Update>(0, nc0, problem.G0, mat);
helpers::sparseAssignDenseBlock<Update>(nc0, 0, problem.G0.transpose(),
mat);
for (Index kk = 0; kk < nc0; kk++) {
if constexpr (Update) {
mat.coeffRef(kk, kk) = -mudyn;
} else {
mat.insert(kk, kk) = -mudyn;
}
}
idx += nc0;
}

for (size_t t = 0; t <= N; t++) {
const knot_t &model = knots[t];
const uint n = model.nx + model.nu + model.nc;
// get block for current variables
auto rhsblk = rhs.segment(idx, n);

rhsblk.head(model.nx) = model.q;
rhsblk.segment(model.nx, model.nu) = model.r;
rhsblk.tail(model.nc) = model.d;

// fill-in Q block
helpers::sparseAssignDenseBlock<Update>(idx, idx, model.Q, mat);
const Index i0 = idx + model.nx; // u
// S block
helpers::sparseAssignDenseBlock<Update>(i0, idx, model.S.transpose(), mat);
helpers::sparseAssignDenseBlock<Update>(idx, i0, model.S, mat);
// R block
helpers::sparseAssignDenseBlock<Update>(i0, i0, model.R, mat);

const Index i1 = i0 + model.nu; // v
// C block
helpers::sparseAssignDenseBlock<Update>(i1, idx, model.C, mat);
helpers::sparseAssignDenseBlock<Update>(idx, i1, model.C.transpose(), mat);
// D block
helpers::sparseAssignDenseBlock<Update>(i1, i0, model.D, mat);
helpers::sparseAssignDenseBlock<Update>(i0, i1, model.D.transpose(), mat);

const Index i2 = i1 + model.nc;
// dual block
for (Index kk = i1; kk < i2; kk++) {
if constexpr (Update) {
mat.coeffRef(kk, kk) = -mueq;
} else {
mat.insert(kk, kk) = -mueq;
}
}

if (t != N) {
rhs.segment(idx + n, model.nx2) = model.f;

// A
helpers::sparseAssignDenseBlock<Update>(i2, idx, model.A, mat);
helpers::sparseAssignDenseBlock<Update>(idx, i2, model.A.transpose(),
mat);
// B
helpers::sparseAssignDenseBlock<Update>(i2, i0, model.B, mat);
helpers::sparseAssignDenseBlock<Update>(i0, i2, model.B.transpose(), mat);
// E
const Index i3 = i2 + model.nx2;
helpers::sparseAssignDenseBlock<Update>(i2, i3, model.E, mat);
helpers::sparseAssignDenseBlock<Update>(i3, i2, model.E.transpose(), mat);

for (Index kk = i2; kk < i3; kk++) {
if constexpr (Update) {
mat.coeffRef(kk, kk) = -mudyn;
} else {
mat.insert(kk, kk) = -mudyn;
}
}

idx += n + model.nx2;
}
}
}

/// @brief A sparse solver for the linear-quadratic problem based on CHOLMOD.
template <typename _Scalar> class CholmodLqSolver {
Expand All @@ -143,26 +21,12 @@ template <typename _Scalar> class CholmodLqSolver {

explicit CholmodLqSolver(const Problem &problem, uint numRefinementSteps = 1);

bool backward(const Scalar mudyn, const Scalar mueq) {
// update the sparse linear problem
lqrCreateSparseMatrix<true>(*problem_, mudyn, mueq, kktMatrix, kktRhs);
cholmod.factorize(kktMatrix);
return cholmod.info() == Eigen::Success;
}
bool backward(const Scalar mudyn, const Scalar mueq);

bool forward(std::vector<VectorXs> &xs, std::vector<VectorXs> &us,
std::vector<VectorXs> &vs, std::vector<VectorXs> &lbdas) const {
kktSol = cholmod.solve(-kktRhs);
kktResidual = kktRhs;
for (uint i = 0; i < numRefinementSteps; i++) {
kktResidual.noalias() += kktMatrix * kktSol;
kktSol += cholmod.solve(-kktResidual);
}
lqrDenseSolutionToTraj(*problem_, kktSol, xs, us, vs, lbdas);
return true;
};
std::vector<VectorXs> &vs, std::vector<VectorXs> &lbdas) const;

Scalar computeSparseResidual() const {
inline Scalar computeSparseResidual() const {
kktResidual = kktRhs;
kktResidual.noalias() += kktMatrix * kktSol;
return math::infty_norm(kktResidual);
Expand All @@ -184,18 +48,6 @@ template <typename _Scalar> class CholmodLqSolver {
const Problem *problem_;
};

template <typename Scalar>
CholmodLqSolver<Scalar>::CholmodLqSolver(const Problem &problem,
uint numRefinementSteps)
: kktMatrix(), kktRhs(), cholmod(), numRefinementSteps(numRefinementSteps),
problem_(&problem) {
lqrCreateSparseMatrix<false>(problem, 1., 1., kktMatrix, kktRhs);
assert(kktMatrix.cols() == kktRhs.rows());
kktSol.resize(kktRhs.rows());
kktResidual.resize(kktRhs.rows());
cholmod.analyzePattern(kktMatrix);
}

#ifdef ALIGATOR_ENABLE_TEMPLATE_INSTANTIATION
extern template class CholmodLqSolver<context::Scalar>;
#endif
Expand Down
46 changes: 46 additions & 0 deletions gar/include/aligator/gar/cholmod-solver.hxx
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
/// @copyright Copyright (C) 2024 LAAS-CNRS, INRIA
/// @author Wilson Jallet
#pragma once

#ifdef ALIGATOR_WITH_CHOLMOD
#include "cholmod-solver.hpp"
#include "utils.hpp"

namespace aligator::gar {

template <typename Scalar>
CholmodLqSolver<Scalar>::CholmodLqSolver(const Problem &problem,
uint numRefinementSteps)
: kktMatrix(), kktRhs(), cholmod(), numRefinementSteps(numRefinementSteps),
problem_(&problem) {
lqrCreateSparseMatrix(problem, 1., 1., kktMatrix, kktRhs, false);
assert(kktMatrix.cols() == kktRhs.rows());
kktSol.resize(kktRhs.rows());
kktResidual.resize(kktRhs.rows());
cholmod.analyzePattern(kktMatrix);
}

template <typename Scalar>
bool CholmodLqSolver<Scalar>::backward(const Scalar mudyn, const Scalar mueq) {
// update the sparse linear problem
lqrCreateSparseMatrix(*problem_, mudyn, mueq, kktMatrix, kktRhs, true);
cholmod.factorize(kktMatrix);
return cholmod.info() == Eigen::Success;
}

template <typename Scalar>
bool CholmodLqSolver<Scalar>::forward(std::vector<VectorXs> &xs,
std::vector<VectorXs> &us,
std::vector<VectorXs> &vs,
std::vector<VectorXs> &lbdas) const {
kktSol = cholmod.solve(-kktRhs);
kktResidual = kktRhs;
for (uint i = 0; i < numRefinementSteps; i++) {
kktResidual.noalias() += kktMatrix * kktSol;
kktSol += cholmod.solve(-kktResidual);
}
lqrDenseSolutionToTraj(*problem_, kktSol, xs, us, vs, lbdas);
return true;
};
} // namespace aligator::gar
#endif
Loading

0 comments on commit 42d281f

Please sign in to comment.