233
void SuiteSparse::ScalarMatrixToBlockMatrix(const cholmod_sparse* A,
234
const vector<int>& row_blocks,
235
const vector<int>& col_blocks,
236
vector<int>* block_rows,
237
vector<int>* block_cols) {
238
CHECK_NOTNULL(block_rows)->clear();
239
CHECK_NOTNULL(block_cols)->clear();
240
const int num_row_blocks = row_blocks.size();
241
const int num_col_blocks = col_blocks.size();
243
vector<int> row_block_starts(num_row_blocks);
244
for (int i = 0, cursor = 0; i < num_row_blocks; ++i) {
245
row_block_starts[i] = cursor;
246
cursor += row_blocks[i];
249
// The reinterpret_cast is needed here because CHOLMOD stores arrays
251
const int* scalar_cols = reinterpret_cast<const int*>(A->p);
252
const int* scalar_rows = reinterpret_cast<const int*>(A->i);
254
// This loop extracts the block sparsity of the scalar sparse matrix
255
// A. It does so by iterating over the columns, but only considering
256
// the columns corresponding to the first element of each column
257
// block. Within each column, the inner loop iterates over the rows,
258
// and detects the presence of a row block by checking for the
259
// presence of a non-zero entry corresponding to its first element.
260
block_cols->push_back(0);
262
for (int col_block = 0; col_block < num_col_blocks; ++col_block) {
264
for (int idx = scalar_cols[c]; idx < scalar_cols[c + 1]; ++idx) {
265
vector<int>::const_iterator it = lower_bound(row_block_starts.begin(),
266
row_block_starts.end(),
268
// Since we are using lower_bound, it will return the row id
269
// where the row block starts. For everything but the first row
270
// of the block, where these values will be the same, we can
271
// skip, as we only need the first row to detect the presence of
274
// For rows all but the first row in the last row block,
275
// lower_bound will return row_block_starts.end(), but those can
276
// be skipped like the rows in other row blocks too.
277
if (it == row_block_starts.end() || *it != scalar_rows[idx]) {
281
block_rows->push_back(it - row_block_starts.begin());
284
block_cols->push_back(block_cols->back() + column_size);
285
c += col_blocks[col_block];
289
void SuiteSparse::BlockOrderingToScalarOrdering(
290
const vector<int>& blocks,
291
const vector<int>& block_ordering,
292
vector<int>* scalar_ordering) {
293
CHECK_EQ(blocks.size(), block_ordering.size());
294
const int num_blocks = blocks.size();
296
// block_starts = [0, block1, block1 + block2 ..]
297
vector<int> block_starts(num_blocks);
298
for (int i = 0, cursor = 0; i < num_blocks ; ++i) {
299
block_starts[i] = cursor;
303
scalar_ordering->resize(block_starts.back() + blocks.back());
305
for (int i = 0; i < num_blocks; ++i) {
306
const int block_id = block_ordering[i];
307
const int block_size = blocks[block_id];
308
int block_position = block_starts[block_id];
309
for (int j = 0; j < block_size; ++j) {
310
(*scalar_ordering)[cursor++] = block_position++;
315
236
bool SuiteSparse::Cholesky(cholmod_sparse* A, cholmod_factor* L) {
316
237
CHECK_NOTNULL(A);
317
238
CHECK_NOTNULL(L);
402
323
cholmod_amd(matrix, NULL, 0, ordering, &cc_);
326
void SuiteSparse::ConstrainedApproximateMinimumDegreeOrdering(
327
cholmod_sparse* matrix,
330
#ifndef CERES_NO_CAMD
331
cholmod_camd(matrix, NULL, 0, constraints, ordering, &cc_);
333
LOG(FATAL) << "Congratulations you have found a bug in Ceres."
334
<< "Ceres Solver was compiled with SuiteSparse "
335
<< "version 4.1.0 or less. Calling this function "
336
<< "in that case is a bug. Please contact the"
337
<< "the Ceres Solver developers.";
405
341
} // namespace internal
406
342
} // namespace ceres