Skip to content

Commit 3b45f26

Browse files
committed
Update on "[PyTorch] Store Tensor explicitly in IValue"
Enables following diff, which will make toTensor() return `const Tensor&` and allow callers to avoid refcounting overhead. Differential Revision: [D25324617](https://our.internmc.facebook.com/intern/diff/D25324617/) [ghstack-poisoned]
2 parents 56a6608 + 8ab072f commit 3b45f26

File tree

481 files changed

+8336
-3918
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

481 files changed

+8336
-3918
lines changed

.circleci/cimodel/data/dimensions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@
88
]
99

1010
ROCM_VERSIONS = [
11-
"3.9",
1211
"3.10",
12+
"4.0",
1313
]
1414

1515
ROCM_VERSION_LABELS = ["rocm" + v for v in ROCM_VERSIONS]

.circleci/config.yml

Lines changed: 104 additions & 104 deletions
Large diffs are not rendered by default.

BUILD.bazel

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -373,7 +373,6 @@ filegroup(
373373
filegroup(
374374
name = "thc_srcs_cu",
375375
srcs = [
376-
"aten/src/THC/THCBlas.cu.cc",
377376
"aten/src/THC/THCReduceApplyUtils.cu.cc",
378377
"aten/src/THC/THCSleep.cu.cc",
379378
"aten/src/THC/THCSortUtils.cu.cc",

CONTRIBUTING.md

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -903,6 +903,16 @@ You'll need to install an appropriately configured flake8; see
903903
[Lint as you type](https://github.com/pytorch/pytorch/wiki/Lint-as-you-type)
904904
for documentation on how to do this.
905905

906+
If you haven't set up the pre-commit hook and have already committed files and
907+
CI reports `flake8` errors, you can run the check locally in your PR branch with:
908+
909+
```bash
910+
flake8 $(git diff --name-only $(git merge-base --fork-point master))
911+
```
912+
913+
fix the code so that no errors are reported when you re-run the above check again,
914+
and then commit the fix.
915+
906916
## Building PyTorch with ASAN
907917
908918
[ASAN](https://github.com/google/sanitizers/wiki/AddressSanitizer) is very

android/gradle/android_tasks.gradle

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
21
import java.nio.file.Files
32
import java.nio.file.Paths
43
import java.io.FileOutputStream

android/pytorch_android/host/build.gradle

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,4 +38,3 @@ dependencies {
3838
}
3939

4040
apply from: rootProject.file('gradle/release.gradle')
41-

android/settings.gradle

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,4 +4,3 @@ project(':pytorch_android_torchvision').projectDir = file('pytorch_android_torch
44

55
project(':pytorch_host').projectDir = file('pytorch_android/host')
66
project(':test_app').projectDir = file('test_app/app')
7-

aten/src/ATen/ATen.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,3 +31,4 @@
3131
#include <c10/util/Exception.h>
3232
#include <ATen/core/UnsafeFromTH.h>
3333
#include <ATen/core/ivalue.h>
34+
#include <ATen/core/jit_type.h>

aten/src/ATen/LegacyTHFunctionsCUDA.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,6 @@ Tensor & _thnn_log_sigmoid_backward_out(Tensor & grad_input, const Tensor & grad
7575
Tensor _thnn_log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer);
7676
Tensor & _thnn_rrelu_with_noise_forward_out(Tensor & output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, c10::optional<at::Generator> generator);
7777
Tensor _thnn_rrelu_with_noise_forward(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, c10::optional<at::Generator> generator);
78-
Tensor & _thnn_rrelu_with_noise_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training);
7978
Tensor _thnn_rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training);
8079
Tensor & _thnn_rrelu_with_noise_forward_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, c10::optional<at::Generator> generator);
8180
std::tuple<Tensor &,Tensor &,Tensor &> _thnn_conv2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding);

aten/src/ATen/MemoryOverlap.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,9 @@ MemOverlapStatus get_overlap_status(TensorImpl* a, TensorImpl* b) {
4848
if (!a->is_contiguous() || !b->is_contiguous()) {
4949
return MemOverlapStatus::TOO_HARD;
5050
}
51+
if (!a->has_storage() || !b->has_storage()) {
52+
return MemOverlapStatus::NO;
53+
}
5154
if (a->storage().data() == b->storage().data()) {
5255
const auto a_begin = static_cast<char*>(a->data());
5356
const auto a_end = a_begin + a->numel() * a->itemsize();

0 commit comments

Comments
 (0)