Skip to content

[PyTorch] Store Tensor explicitly in IValue #48824

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 13 commits into from
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Update on "[PyTorch] Store Tensor explicitly in IValue"
Enables following diff, which will make toTensor() return
`const Tensor&` and allow callers to avoid refcounting overhead.

Differential Revision: [D25324617](https://our.internmc.facebook.com/intern/diff/D25324617/)

[ghstack-poisoned]
  • Loading branch information
swolchok committed Dec 22, 2020
commit fd28d3f47a7369ab134891780a5aad147aee2284
9 changes: 7 additions & 2 deletions aten/src/ATen/core/ivalue.h
Original file line number Diff line number Diff line change
Expand Up @@ -791,7 +791,12 @@ struct TORCH_API IValue final {
const void* internalToPointer() const {
TORCH_INTERNAL_ASSERT(
isPtrType(), "Can only call internalToPointer() for pointer types");
return payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton() ? payload.u.as_intrusive_ptr : nullptr;
if (isTensor()) {
return payload.as_tensor.unsafeGetTensorImpl();
} else {
return payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()
? payload.u.as_intrusive_ptr : nullptr;
}
}

TypePtr type() const;
Expand Down Expand Up @@ -939,7 +944,7 @@ struct TORCH_API IValue final {
friend struct WeakIValue;
};

struct CAFFE2_API WeakIValue final {
struct TORCH_API WeakIValue final {
WeakIValue() : tag(IValue::Tag::None), is_intrusive_ptr(false) {}

WeakIValue(const WeakIValue& rhs)
Expand Down
107 changes: 107 additions & 0 deletions aten/src/ATen/test/ivalue_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -426,5 +426,112 @@ TEST(IValueTest, isAliasOf) {
}
}

TEST(IValueTest, internalToPointer) {
IValue tensor(at::rand({3, 4}));
IValue str("hello");

EXPECT_EQ(tensor.internalToPointer(), tensor.unsafeToTensorImpl());
EXPECT_NE(str.internalToPointer(), nullptr);

IValue nullStr((c10::intrusive_ptr<ivalue::ConstantString>()));
ASSERT_TRUE(nullStr.isString());
EXPECT_EQ(nullStr.internalToPointer(), nullptr);
}

TEST(IValueTest, IdentityComparisonAndHashing) {
at::Tensor t1 = at::rand({3, 4});
at::Tensor t2 = at::rand({3, 4});
IValue tv1(t1), tv2(t2);
IValue tv1b(t1);

EXPECT_EQ(tv1.hash(), tv1b.hash());
EXPECT_NE(tv1.hash(), tv2.hash());

EXPECT_TRUE(tv1.is(tv1));
EXPECT_TRUE(tv1.is(tv1b));
EXPECT_TRUE(tv1b.is(tv1));
EXPECT_TRUE(tv2.is(tv2));

EXPECT_FALSE(tv1.is(tv2));
EXPECT_FALSE(tv2.is(tv1));

IValue none;
IValue undefinedTensor((at::Tensor()));

EXPECT_TRUE(none.is(undefinedTensor));
EXPECT_TRUE(undefinedTensor.is(none));

// Is this a bug? We should probably have a is b => a.hash() == b.hash()
EXPECT_NE(none.hash(), undefinedTensor.hash());

auto sampleIValues = makeSampleIValues();
auto sampleIValues2 = makeSampleIValues();
auto moreSampleIValues = makeMoreSampleIValues();

ASSERT_EQ(sampleIValues.size(), moreSampleIValues.size());
for (int ii = 0; ii < sampleIValues.size(); ++ii) {
// Constant strings will have the same pointer value.
if (sampleIValues[ii].isPtrType() && !sampleIValues[ii].isString()) {
EXPECT_NE(sampleIValues[ii].hash(), sampleIValues2[ii].hash());
} else {
EXPECT_EQ(sampleIValues[ii].hash(), sampleIValues2[ii].hash());
}
EXPECT_NE(sampleIValues[ii].hash(), moreSampleIValues[ii].hash());
}
}

TEST(IValueTest, getSubValues) {
// Scalars have no subvalues.
IValue integer(42), float_(1.5);

IValue::HashAliasedIValues subvalues;

integer.getSubValues(subvalues);
EXPECT_TRUE(subvalues.empty());

subvalues.clear();

float_.getSubValues(subvalues);
EXPECT_TRUE(subvalues.empty());

subvalues.clear();

at::Tensor t1(at::rand({3, 4})), t2(at::rand({3, 4}));
IValue tv1(t1), tv2(t2);
IValue list(std::vector<at::Tensor>{t1, t2});
IValue tuple(ivalue::Tuple::create({tv1, tv2}));

std::unordered_map<int64_t, at::Tensor> m;
m[1] = t1;
m[2] = t2;

IValue dict(std::move(m));

auto objType = ClassType::create(nullopt, {});
objType->addAttribute("t1", tv1.type());
objType->addAttribute("t2", tv2.type());

auto o = ivalue::Object::create(StrongTypePtr(0, objType), 2);
o->setSlot(0, tv1);
o->setSlot(1, tv2);

IValue object(o);
tv1.getSubValues(subvalues);
EXPECT_EQ(subvalues.size(), 1);
EXPECT_EQ(subvalues.count(tv1), 1);

subvalues.clear();

for (auto& container: {list, tuple, dict, object}) {
container.getSubValues(subvalues);
EXPECT_EQ(subvalues.size(), 3);
EXPECT_EQ(subvalues.count(container), 1);
EXPECT_EQ(subvalues.count(tv1), 1);
EXPECT_EQ(subvalues.count(tv2), 1);

subvalues.clear();
}
}

// TODO(gmagogsfm): Add type conversion test?
} // namespace c10
You are viewing a condensed version of this merge commit. You can view the full changes here.