Skip to content

Commit

Permalink
[mlir][sparse] Renaming the STEA field dimLevelType to lvlTypes
Browse files Browse the repository at this point in the history
This commit is part of the migration of towards the new STEA syntax/design.  In particular, this commit includes the following changes:
* Renaming compiler-internal functions/methods:
  * `SparseTensorEncodingAttr::{getDimLevelType => getLvlTypes}`
  * `Merger::{getDimLevelType => getLvlType}` (for consistency)
  * `sparse_tensor::{getDimLevelType => buildLevelType}` (to help reduce confusion vs actual getter methods)
* Renaming external facets to match:
  * the STEA parser and printer
  * the C and Python bindings
  * PyTACO

However, the actual renaming of the `DimLevelType` itself (along with all the "dlt" names) will be handled in a separate commit.

Reviewed By: aartbik

Differential Revision: https://reviews.llvm.org/D150330
  • Loading branch information
wrengr committed May 17, 2023
1 parent 4dc205f commit a0615d0
Show file tree
Hide file tree
Showing 172 changed files with 1,229 additions and 1,240 deletions.
4 changes: 2 additions & 2 deletions mlir/include/mlir-c/Dialect/SparseTensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ mlirAttributeIsASparseTensorEncodingAttr(MlirAttribute attr);
/// Creates a `sparse_tensor.encoding` attribute with the given parameters.
MLIR_CAPI_EXPORTED MlirAttribute mlirSparseTensorEncodingAttrGet(
MlirContext ctx, intptr_t lvlRank,
enum MlirSparseTensorDimLevelType const *dimLevelTypes,
enum MlirSparseTensorDimLevelType const *lvlTypes,
MlirAffineMap dimOrdering, MlirAffineMap higherOrdering, int posWidth,
int crdWidth);

Expand All @@ -62,7 +62,7 @@ mlirSparseTensorEncodingGetLvlRank(MlirAttribute attr);

/// Returns a specified level-type of the `sparse_tensor.encoding` attribute.
MLIR_CAPI_EXPORTED enum MlirSparseTensorDimLevelType
mlirSparseTensorEncodingAttrGetDimLevelType(MlirAttribute attr, intptr_t lvl);
mlirSparseTensorEncodingAttrGetLvlType(MlirAttribute attr, intptr_t lvl);

/// Returns the dimension-ordering of the `sparse_tensor.encoding` attribute.
MLIR_CAPI_EXPORTED MlirAffineMap
Expand Down
28 changes: 14 additions & 14 deletions mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,7 @@ constexpr std::optional<LevelFormat> getLevelFormat(DimLevelType dlt) {
/// TODO: factor out a new LevelProperties type so we can add new properties
/// without changing this function's signature
constexpr std::optional<DimLevelType>
getDimLevelType(LevelFormat lf, bool ordered, bool unique) {
buildLevelType(LevelFormat lf, bool ordered, bool unique) {
auto dlt = static_cast<DimLevelType>(static_cast<uint8_t>(lf) |
(ordered ? 0 : 2) | (unique ? 0 : 1));
return isValidDLT(dlt) ? std::optional(dlt) : std::nullopt;
Expand All @@ -321,27 +321,27 @@ static_assert(
"getLevelFormat conversion is broken");

static_assert(
(getDimLevelType(LevelFormat::Dense, false, true) == std::nullopt &&
getDimLevelType(LevelFormat::Dense, true, false) == std::nullopt &&
getDimLevelType(LevelFormat::Dense, false, false) == std::nullopt &&
*getDimLevelType(LevelFormat::Dense, true, true) == DimLevelType::Dense &&
*getDimLevelType(LevelFormat::Compressed, true, true) ==
(buildLevelType(LevelFormat::Dense, false, true) == std::nullopt &&
buildLevelType(LevelFormat::Dense, true, false) == std::nullopt &&
buildLevelType(LevelFormat::Dense, false, false) == std::nullopt &&
*buildLevelType(LevelFormat::Dense, true, true) == DimLevelType::Dense &&
*buildLevelType(LevelFormat::Compressed, true, true) ==
DimLevelType::Compressed &&
*getDimLevelType(LevelFormat::Compressed, true, false) ==
*buildLevelType(LevelFormat::Compressed, true, false) ==
DimLevelType::CompressedNu &&
*getDimLevelType(LevelFormat::Compressed, false, true) ==
*buildLevelType(LevelFormat::Compressed, false, true) ==
DimLevelType::CompressedNo &&
*getDimLevelType(LevelFormat::Compressed, false, false) ==
*buildLevelType(LevelFormat::Compressed, false, false) ==
DimLevelType::CompressedNuNo &&
*getDimLevelType(LevelFormat::Singleton, true, true) ==
*buildLevelType(LevelFormat::Singleton, true, true) ==
DimLevelType::Singleton &&
*getDimLevelType(LevelFormat::Singleton, true, false) ==
*buildLevelType(LevelFormat::Singleton, true, false) ==
DimLevelType::SingletonNu &&
*getDimLevelType(LevelFormat::Singleton, false, true) ==
*buildLevelType(LevelFormat::Singleton, false, true) ==
DimLevelType::SingletonNo &&
*getDimLevelType(LevelFormat::Singleton, false, false) ==
*buildLevelType(LevelFormat::Singleton, false, false) ==
DimLevelType::SingletonNuNo),
"getDimLevelType conversion is broken");
"buildLevelType conversion is broken");

// Ensure the above predicates work as intended.
static_assert((isValidDLT(DimLevelType::Undef) &&
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
ArrayRefParameter<
"::mlir::sparse_tensor::DimLevelType",
"level-types"
>: $dimLevelType,
>: $lvlTypes,
// A permutation from (higher-ordering)-coordinates to level-coordinates.
"AffineMap":$dimOrdering,
// A mapping from dimension-coordinates to (higher-ordering)-coordinates.
Expand All @@ -283,12 +283,12 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
);

let builders = [
AttrBuilder<(ins "ArrayRef<::mlir::sparse_tensor::DimLevelType>":$dimLevelType,
AttrBuilder<(ins "ArrayRef<::mlir::sparse_tensor::DimLevelType>":$lvlTypes,
"AffineMap":$dimOrdering,
"AffineMap":$higherOrdering,
"unsigned":$posWidth,
"unsigned":$crdWidth), [{
return $_get($_ctxt, dimLevelType,
return $_get($_ctxt, lvlTypes,
dimOrdering,
higherOrdering,
posWidth,
Expand Down
8 changes: 4 additions & 4 deletions mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h
Original file line number Diff line number Diff line change
Expand Up @@ -377,14 +377,14 @@ class Merger {
bool hasSparseIdxReduction(const BitVector &bits) const;

/// Gets the level-type of the `t`th tensor on `i`th loop.
DimLevelType getDimLevelType(TensorId t, LoopId i) const {
DimLevelType getLvlType(TensorId t, LoopId i) const {
assert(isValidTensorId(t) && isValidLoopId(i));
return lvlTypes[t][i];
}

/// Gets the level-type of the TensorLoopId.
DimLevelType getDimLevelType(TensorLoopId b) const {
return getDimLevelType(tensor(b), loop(b));
DimLevelType getLvlType(TensorLoopId b) const {
return getLvlType(tensor(b), loop(b));
}

/// Gets the loop identifier for the `lvl`th level of the `t`th tensor.
Expand Down Expand Up @@ -434,7 +434,7 @@ class Merger {
for (const TensorLoopId b : bits.set_bits()) {
const TensorId t = tensor(b);
const auto optLvl = getLvl(b);
const auto lvlTp = getDimLevelType(b);
const auto lvlTp = getLvlType(b);
if (isLvlWithNonTrivialIdxExp(b)) {
// This must be an undefined level.
assert(!optLvl.has_value());
Expand Down
12 changes: 5 additions & 7 deletions mlir/lib/Bindings/Python/DialectSparseTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,30 +39,28 @@ static void populateDialectSparseTensorSubmodule(const py::module &m) {
mlirAttributeIsASparseTensorEncodingAttr)
.def_classmethod(
"get",
[](py::object cls,
std::vector<MlirSparseTensorDimLevelType> dimLevelTypes,
[](py::object cls, std::vector<MlirSparseTensorDimLevelType> lvlTypes,
std::optional<MlirAffineMap> dimOrdering,
std::optional<MlirAffineMap> higherOrdering, int posWidth,
int crdWidth, MlirContext context) {
return cls(mlirSparseTensorEncodingAttrGet(
context, dimLevelTypes.size(), dimLevelTypes.data(),
context, lvlTypes.size(), lvlTypes.data(),
dimOrdering ? *dimOrdering : MlirAffineMap{nullptr},
higherOrdering ? *higherOrdering : MlirAffineMap{nullptr},
posWidth, crdWidth));
},
py::arg("cls"), py::arg("dim_level_types"), py::arg("dim_ordering"),
py::arg("cls"), py::arg("lvl_types"), py::arg("dim_ordering"),
py::arg("higher_ordering"), py::arg("pos_width"),
py::arg("crd_width"), py::arg("context") = py::none(),
"Gets a sparse_tensor.encoding from parameters.")
.def_property_readonly(
"dim_level_types",
"lvl_types",
[](MlirAttribute self) {
const int lvlRank = mlirSparseTensorEncodingGetLvlRank(self);
std::vector<MlirSparseTensorDimLevelType> ret;
ret.reserve(lvlRank);
for (int l = 0; l < lvlRank; ++l)
ret.push_back(
mlirSparseTensorEncodingAttrGetDimLevelType(self, l));
ret.push_back(mlirSparseTensorEncodingAttrGetLvlType(self, l));
return ret;
})
.def_property_readonly(
Expand Down
17 changes: 8 additions & 9 deletions mlir/lib/CAPI/Dialect/SparseTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,16 +47,15 @@ bool mlirAttributeIsASparseTensorEncodingAttr(MlirAttribute attr) {

MlirAttribute mlirSparseTensorEncodingAttrGet(
MlirContext ctx, intptr_t lvlRank,
MlirSparseTensorDimLevelType const *dimLevelTypes,
MlirAffineMap dimOrdering, MlirAffineMap higherOrdering, int posWidth,
int crdWidth) {
SmallVector<DimLevelType> cppDimLevelTypes;
cppDimLevelTypes.reserve(lvlRank);
MlirSparseTensorDimLevelType const *lvlTypes, MlirAffineMap dimOrdering,
MlirAffineMap higherOrdering, int posWidth, int crdWidth) {
SmallVector<DimLevelType> cppLvlTypes;
cppLvlTypes.reserve(lvlRank);
for (intptr_t l = 0; l < lvlRank; ++l)
cppDimLevelTypes.push_back(static_cast<DimLevelType>(dimLevelTypes[l]));
cppLvlTypes.push_back(static_cast<DimLevelType>(lvlTypes[l]));
return wrap(SparseTensorEncodingAttr::get(
unwrap(ctx), cppDimLevelTypes, unwrap(dimOrdering),
unwrap(higherOrdering), posWidth, crdWidth));
unwrap(ctx), cppLvlTypes, unwrap(dimOrdering), unwrap(higherOrdering),
posWidth, crdWidth));
}

MlirAffineMap mlirSparseTensorEncodingAttrGetDimOrdering(MlirAttribute attr) {
Expand All @@ -73,7 +72,7 @@ intptr_t mlirSparseTensorEncodingGetLvlRank(MlirAttribute attr) {
}

MlirSparseTensorDimLevelType
mlirSparseTensorEncodingAttrGetDimLevelType(MlirAttribute attr, intptr_t lvl) {
mlirSparseTensorEncodingAttrGetLvlType(MlirAttribute attr, intptr_t lvl) {
return static_cast<MlirSparseTensorDimLevelType>(
cast<SparseTensorEncodingAttr>(unwrap(attr)).getLvlType(lvl));
}
Expand Down
48 changes: 23 additions & 25 deletions mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -130,23 +130,22 @@ Type SparseTensorEncodingAttr::getCrdType() const {
}

SparseTensorEncodingAttr SparseTensorEncodingAttr::withoutOrdering() const {
return SparseTensorEncodingAttr::get(getContext(), getDimLevelType(),
AffineMap(), AffineMap(), getPosWidth(),
return SparseTensorEncodingAttr::get(getContext(), getLvlTypes(), AffineMap(),
AffineMap(), getPosWidth(),
getCrdWidth());
}

SparseTensorEncodingAttr SparseTensorEncodingAttr::withoutBitWidths() const {
return SparseTensorEncodingAttr::get(getContext(), getDimLevelType(),
getDimOrdering(), getHigherOrdering(), 0,
0);
return SparseTensorEncodingAttr::get(
getContext(), getLvlTypes(), getDimOrdering(), getHigherOrdering(), 0, 0);
}

bool SparseTensorEncodingAttr::isAllDense() const {
return !getImpl() || llvm::all_of(getDimLevelType(), isDenseDLT);
return !getImpl() || llvm::all_of(getLvlTypes(), isDenseDLT);
}

bool SparseTensorEncodingAttr::isAllOrdered() const {
return !getImpl() || llvm::all_of(getDimLevelType(), isOrderedDLT);
return !getImpl() || llvm::all_of(getLvlTypes(), isOrderedDLT);
}

bool SparseTensorEncodingAttr::hasIdDimOrdering() const {
Expand All @@ -155,14 +154,14 @@ bool SparseTensorEncodingAttr::hasIdDimOrdering() const {

Level SparseTensorEncodingAttr::getLvlRank() const {
assert(getImpl() && "Uninitialized SparseTensorEncodingAttr");
return getDimLevelType().size();
return getLvlTypes().size();
}

DimLevelType SparseTensorEncodingAttr::getLvlType(Level l) const {
if (!getImpl())
return DimLevelType::Dense;
assert(l < getLvlRank() && "Level is out of bounds");
return getDimLevelType()[l];
return getLvlTypes()[l];
}

std::optional<uint64_t>
Expand Down Expand Up @@ -243,9 +242,8 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) {

StringRef attrName;
// Exactly 6 keys.
SmallVector<StringRef, 6> keys = {"dimLevelType", "dimOrdering",
"higherOrdering", "posWidth",
"crdWidth", "slice"};
SmallVector<StringRef, 6> keys = {"lvlTypes", "dimOrdering", "higherOrdering",
"posWidth", "crdWidth", "slice"};
while (succeeded(parser.parseOptionalKeyword(&attrName))) {
if (!llvm::is_contained(keys, attrName)) {
parser.emitError(parser.getNameLoc(), "unexpected key: ") << attrName;
Expand All @@ -258,7 +256,7 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) {
// cost of the `is_contained` check above. Should instead use some
// "find" function that returns the index into `keys` so that we can
// dispatch on that instead.
if (attrName == "dimLevelType") {
if (attrName == "lvlTypes") {
Attribute attr;
RETURN_ON_FAIL(parser.parseAttribute(attr));
auto arrayAttr = llvm::dyn_cast<ArrayAttr>(attr);
Expand Down Expand Up @@ -336,8 +334,8 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) {

void SparseTensorEncodingAttr::print(AsmPrinter &printer) const {
// Print the struct-like storage in dictionary fashion.
printer << "<{ dimLevelType = [ ";
llvm::interleaveComma(getDimLevelType(), printer, [&](DimLevelType dlt) {
printer << "<{ lvlTypes = [ ";
llvm::interleaveComma(getLvlTypes(), printer, [&](DimLevelType dlt) {
printer << "\"" << toMLIRString(dlt) << "\"";
});
printer << " ]";
Expand Down Expand Up @@ -366,7 +364,7 @@ void SparseTensorEncodingAttr::print(AsmPrinter &printer) const {

LogicalResult SparseTensorEncodingAttr::verify(
function_ref<InFlightDiagnostic()> emitError,
ArrayRef<DimLevelType> dimLevelType, AffineMap dimOrdering,
ArrayRef<DimLevelType> lvlTypes, AffineMap dimOrdering,
AffineMap higherOrdering, unsigned posWidth, unsigned crdWidth,
ArrayRef<SparseTensorDimSliceAttr> dimSlices) {
if (!acceptBitWidth(posWidth))
Expand All @@ -378,7 +376,7 @@ LogicalResult SparseTensorEncodingAttr::verify(
// the `getLvlRank` method is the length of the level-types array,
// since it must always be provided and have full rank; therefore we
// use that same source-of-truth here.
const Level lvlRank = dimLevelType.size();
const Level lvlRank = lvlTypes.size();
if (lvlRank == 0)
return emitError() << "expected a non-empty array for level types";
if (dimOrdering) {
Expand Down Expand Up @@ -415,9 +413,9 @@ LogicalResult SparseTensorEncodingAttr::verifyEncoding(
function_ref<InFlightDiagnostic()> emitError) const {
// Check structural integrity. In particular, this ensures that the
// level-rank is coherent across all the fields.
RETURN_FAILURE_IF_FAILED(verify(emitError, getDimLevelType(),
getDimOrdering(), getHigherOrdering(),
getPosWidth(), getCrdWidth(), getDimSlices()))
RETURN_FAILURE_IF_FAILED(verify(emitError, getLvlTypes(), getDimOrdering(),
getHigherOrdering(), getPosWidth(),
getCrdWidth(), getDimSlices()))
// Check integrity with tensor type specifics. In particular, we
// need only check that the dimension-rank of the tensor agrees with
// the dimension-rank of the encoding.
Expand Down Expand Up @@ -496,14 +494,14 @@ RankedTensorType sparse_tensor::getCOOFromTypeWithOrdering(RankedTensorType rtt,
// An unordered and non-unique compressed level at beginning.
// If this is also the last level, then it is unique.
lvlTypes.push_back(
*getDimLevelType(LevelFormat::Compressed, ordered, lvlRank == 1));
*buildLevelType(LevelFormat::Compressed, ordered, lvlRank == 1));
if (lvlRank > 1) {
// TODO: it is actually ordered at the level for ordered input.
// Followed by unordered non-unique n-2 singleton levels.
std::fill_n(std::back_inserter(lvlTypes), lvlRank - 2,
*getDimLevelType(LevelFormat::Singleton, ordered, false));
*buildLevelType(LevelFormat::Singleton, ordered, false));
// Ends by a unique singleton level unless the lvlRank is 1.
lvlTypes.push_back(*getDimLevelType(LevelFormat::Singleton, ordered, true));
lvlTypes.push_back(*buildLevelType(LevelFormat::Singleton, ordered, true));
}

// TODO: Maybe pick the bitwidth based on input/output tensors (probably the
Expand Down Expand Up @@ -580,8 +578,8 @@ Level mlir::sparse_tensor::toStoredDim(RankedTensorType type, Dimension d) {
static SparseTensorEncodingAttr
getNormalizedEncodingForSpecifier(SparseTensorEncodingAttr enc) {
SmallVector<DimLevelType> dlts;
for (auto dlt : enc.getDimLevelType())
dlts.push_back(*getDimLevelType(*getLevelFormat(dlt), true, true));
for (auto dlt : enc.getLvlTypes())
dlts.push_back(*buildLevelType(*getLevelFormat(dlt), true, true));

return SparseTensorEncodingAttr::get(
enc.getContext(), dlts,
Expand Down
6 changes: 2 additions & 4 deletions mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,11 +79,9 @@ class CodegenEnv {
const LatPoint &lat(LatPointId l) const { return latticeMerger.lat(l); }
ArrayRef<LatPointId> set(LatSetId s) const { return latticeMerger.set(s); }
DimLevelType dlt(TensorId t, LoopId i) const {
return latticeMerger.getDimLevelType(t, i);
}
DimLevelType dlt(TensorLoopId b) const {
return latticeMerger.getDimLevelType(b);
return latticeMerger.getLvlType(t, i);
}
DimLevelType dlt(TensorLoopId b) const { return latticeMerger.getLvlType(b); }

//
// LoopEmitter delegates.
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,7 @@ void LoopEmitter::initialize(ValueRange ts, StringAttr loopTag, bool hasOutput,
if (stt.hasEncoding() && !(isOutputTensor(tid) && isSparseOut)) {
const auto enc = stt.getEncoding();
isSparseSlices[tid] = enc.isSlice();
for (auto lvlTp : enc.getDimLevelType())
for (auto lvlTp : enc.getLvlTypes())
lvlTypes[tid].push_back(lvlTp);
} else {
lvlTypes[tid].assign(lvlRank, DimLevelType::Dense);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1159,7 +1159,7 @@ class SparseExtractSliceConverter
// TODO: We should check these in ExtractSliceOp::verify.
if (!srcEnc || !dstEnc || !dstEnc.isSlice())
return failure();
assert(srcEnc.getDimLevelType() == dstEnc.getDimLevelType());
assert(srcEnc.getLvlTypes() == dstEnc.getLvlTypes());
assert(srcEnc.getDimOrdering() == dstEnc.getDimOrdering());
assert(srcEnc.getHigherOrdering() == dstEnc.getHigherOrdering());
assert(srcEnc.getPosWidth() == dstEnc.getPosWidth());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ static Value genLvlTypesBuffer(OpBuilder &builder, Location loc,
SparseTensorType stt) {
SmallVector<Value> lvlTypes;
lvlTypes.reserve(stt.getLvlRank());
for (const auto dlt : stt.getEncoding().getDimLevelType())
for (const auto dlt : stt.getEncoding().getLvlTypes())
lvlTypes.push_back(constantDimLevelTypeEncoding(builder, loc, dlt));
return allocaBuffer(builder, loc, lvlTypes);
}
Expand Down Expand Up @@ -565,7 +565,7 @@ static void genSparseCOOIterationLoop(
rewriter.setInsertionPointToStart(after);

const bool hasDenseDim =
llvm::any_of(stt.getEncoding().getDimLevelType(), isDenseDLT);
llvm::any_of(stt.getEncoding().getLvlTypes(), isDenseDLT);
if (hasDenseDim) {
Value elemV = rewriter.create<memref::LoadOp>(loc, elemPtr);
Value isZero = genIsNonzero(rewriter, loc, elemV);
Expand Down Expand Up @@ -880,11 +880,11 @@ class SparseTensorConvertConverter : public OpConversionPattern<ConvertOp> {
break;
case SparseToSparseConversionStrategy::kDirect:
useDirectConversion = true;
assert(canUseDirectConversion(dstEnc.getDimLevelType()) &&
assert(canUseDirectConversion(dstEnc.getLvlTypes()) &&
"Unsupported target for direct sparse-to-sparse conversion");
break;
case SparseToSparseConversionStrategy::kAuto:
useDirectConversion = canUseDirectConversion(dstEnc.getDimLevelType());
useDirectConversion = canUseDirectConversion(dstEnc.getLvlTypes());
break;
}
if (useDirectConversion) {
Expand All @@ -896,7 +896,7 @@ class SparseTensorConvertConverter : public OpConversionPattern<ConvertOp> {
// method calls can share most parameters, while still providing
// the correct sparsity information to either of them.
const auto mixedEnc = SparseTensorEncodingAttr::get(
op->getContext(), dstEnc.getDimLevelType(), dstEnc.getDimOrdering(),
op->getContext(), dstEnc.getLvlTypes(), dstEnc.getDimOrdering(),
dstEnc.getHigherOrdering(), srcEnc.getPosWidth(),
srcEnc.getCrdWidth());
// TODO: This is the only place where `kToCOO` (or `kToIterator`)
Expand Down
Loading

0 comments on commit a0615d0

Please sign in to comment.