|
| 1 | +// Subset of torch/csrc/autograd/generated/VariableTypeEverything.cpp |
| 2 | +#include "torch/csrc/autograd/VariableTypeUtils.h" |
| 3 | + |
| 4 | +#include <ATen/TypeDefault.h> |
| 5 | +#include <ATen/core/op_registration/op_registration.h> |
| 6 | + |
| 7 | +// @generated from tools/autograd/templates/VariableType.cpp |
| 8 | + |
| 9 | +// NOTE [Sharded File]: on this file's split-into-shards state |
| 10 | +// |
| 11 | +// Back in the good old days, VariableType.cpp was generated as one |
| 12 | +// file with every function in it, and everything was great and |
| 13 | +// simple. |
| 14 | +// |
| 15 | +// However, this file was also very large (over 36,000 lines), and |
| 16 | +// compiling it was very slow, and in fact was a significant |
| 17 | +// bottleneck for incremental rebuilds. To address this, we now |
| 18 | +// generate the file split across multiple shards, named |
| 19 | +// VariableType_0.cpp and so on, which can be compiled in parallel. |
| 20 | +// |
| 21 | +// For ease of inspection and debugging, so that it's not necessary to |
| 22 | +// go rooting around in multiple files, we also generate all the |
| 23 | +// functions together in VariableTypeEverything.cpp. This generated |
| 24 | +// file is only for convenience; it's not actually used in the |
| 25 | +// build. If the file you're looking at now is one of the shards, you |
| 26 | +// may want to switch over to the Everything variant to make you |
| 27 | +// grepping smoother. |
| 28 | + |
| 29 | +using namespace at; |
| 30 | +using namespace torch::autograd::generated; |
| 31 | + |
| 32 | +namespace torch { namespace autograd { |
| 33 | + |
| 34 | +namespace VariableType { |
| 35 | +namespace { |
| 36 | + |
| 37 | +Tensor mul(const Tensor & self, const Tensor & other) { |
| 38 | + RECORD_FUNCTION("mul", std::vector<c10::IValue>({self, other}), Node::peek_at_next_sequence_nr()); |
| 39 | + auto& self_ = unpack(self, "self", 0); |
| 40 | + auto& other_ = unpack(other, "other", 1); |
| 41 | + std::shared_ptr<MulBackward0> grad_fn; |
| 42 | + if (compute_requires_grad( self, other )) { |
| 43 | + grad_fn = std::shared_ptr<MulBackward0>(new MulBackward0(), deleteNode); |
| 44 | + grad_fn->set_next_edges(collect_next_edges( self, other )); |
| 45 | + if (grad_fn->should_compute_output(1)) { |
| 46 | + grad_fn->self_ = SavedVariable(self, false); |
| 47 | + } |
| 48 | + if (grad_fn->should_compute_output(0)) { |
| 49 | + grad_fn->other_ = SavedVariable(other, false); |
| 50 | + } |
| 51 | + } |
| 52 | + torch::jit::Node* node = nullptr; |
| 53 | + std::shared_ptr<jit::tracer::TracingState> tracer_state; |
| 54 | + if (jit::tracer::isTracing()) { |
| 55 | + tracer_state = jit::tracer::getTracingState(); |
| 56 | + at::Symbol op_name; |
| 57 | + op_name = jit::Symbol::fromQualString("aten::mul"); |
| 58 | + node = tracer_state->graph->create(op_name, /*num_outputs=*/0); |
| 59 | + jit::tracer::recordSourceLocation(node); |
| 60 | + jit::tracer::addInputs(node, "self", self); |
| 61 | + jit::tracer::addInputs(node, "other", other); |
| 62 | + tracer_state->graph->insertNode(node); |
| 63 | + |
| 64 | + jit::tracer::setTracingState(nullptr); |
| 65 | + } |
| 66 | + #ifndef NDEBUG |
| 67 | + c10::optional<Storage> self__storage_saved = |
| 68 | + self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; |
| 69 | + c10::intrusive_ptr<TensorImpl> self__impl_saved; |
| 70 | + if (self_.defined()) self__impl_saved = self_.getIntrusivePtr(); |
| 71 | + c10::optional<Storage> other__storage_saved = |
| 72 | + other_.has_storage() ? c10::optional<Storage>(other_.storage()) : c10::nullopt; |
| 73 | + c10::intrusive_ptr<TensorImpl> other__impl_saved; |
| 74 | + if (other_.defined()) other__impl_saved = other_.getIntrusivePtr(); |
| 75 | + #endif |
| 76 | + auto tmp = ([&]() { |
| 77 | + at::AutoNonVariableTypeMode non_var_type_mode(true); |
| 78 | + return at::mul(self_, other_); |
| 79 | + })(); |
| 80 | + auto result = std::move(tmp); |
| 81 | + #ifndef NDEBUG |
| 82 | + if (self__storage_saved.has_value()) |
| 83 | + AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); |
| 84 | + if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); |
| 85 | + if (other__storage_saved.has_value()) |
| 86 | + AT_ASSERT(other__storage_saved.value().is_alias_of(other_.storage())); |
| 87 | + if (other__impl_saved) AT_ASSERT(other__impl_saved == other_.getIntrusivePtr()); |
| 88 | + #endif |
| 89 | + if (grad_fn) { |
| 90 | + set_history(flatten_tensor_args( result ), grad_fn); |
| 91 | + } |
| 92 | + if (tracer_state) { |
| 93 | + jit::tracer::setTracingState(std::move(tracer_state)); |
| 94 | + jit::tracer::addOutput(node, result); |
| 95 | + } |
| 96 | + return result; |
| 97 | +} |
| 98 | + |
| 99 | +Tensor add(const Tensor & self, const Tensor & other, Scalar alpha) { |
| 100 | + RECORD_FUNCTION("add", std::vector<c10::IValue>({self, other, alpha}), Node::peek_at_next_sequence_nr()); |
| 101 | + auto& self_ = unpack(self, "self", 0); |
| 102 | + auto& other_ = unpack(other, "other", 1); |
| 103 | + std::shared_ptr<AddBackward0> grad_fn; |
| 104 | + if (compute_requires_grad( self, other )) { |
| 105 | + grad_fn = std::shared_ptr<AddBackward0>(new AddBackward0(), deleteNode); |
| 106 | + grad_fn->set_next_edges(collect_next_edges( self, other )); |
| 107 | + grad_fn->alpha = alpha; |
| 108 | + } |
| 109 | + torch::jit::Node* node = nullptr; |
| 110 | + std::shared_ptr<jit::tracer::TracingState> tracer_state; |
| 111 | + if (jit::tracer::isTracing()) { |
| 112 | + tracer_state = jit::tracer::getTracingState(); |
| 113 | + at::Symbol op_name; |
| 114 | + op_name = jit::Symbol::fromQualString("aten::add"); |
| 115 | + node = tracer_state->graph->create(op_name, /*num_outputs=*/0); |
| 116 | + jit::tracer::recordSourceLocation(node); |
| 117 | + jit::tracer::addInputs(node, "self", self); |
| 118 | + jit::tracer::addInputs(node, "other", other); |
| 119 | + jit::tracer::addInputs(node, "alpha", alpha); |
| 120 | + tracer_state->graph->insertNode(node); |
| 121 | + |
| 122 | + jit::tracer::setTracingState(nullptr); |
| 123 | + } |
| 124 | + #ifndef NDEBUG |
| 125 | + c10::optional<Storage> self__storage_saved = |
| 126 | + self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; |
| 127 | + c10::intrusive_ptr<TensorImpl> self__impl_saved; |
| 128 | + if (self_.defined()) self__impl_saved = self_.getIntrusivePtr(); |
| 129 | + c10::optional<Storage> other__storage_saved = |
| 130 | + other_.has_storage() ? c10::optional<Storage>(other_.storage()) : c10::nullopt; |
| 131 | + c10::intrusive_ptr<TensorImpl> other__impl_saved; |
| 132 | + if (other_.defined()) other__impl_saved = other_.getIntrusivePtr(); |
| 133 | + #endif |
| 134 | + auto tmp = ([&]() { |
| 135 | + at::AutoNonVariableTypeMode non_var_type_mode(true); |
| 136 | + return at::add(self_, other_, alpha); |
| 137 | + })(); |
| 138 | + auto result = std::move(tmp); |
| 139 | + #ifndef NDEBUG |
| 140 | + if (self__storage_saved.has_value()) |
| 141 | + AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); |
| 142 | + if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); |
| 143 | + if (other__storage_saved.has_value()) |
| 144 | + AT_ASSERT(other__storage_saved.value().is_alias_of(other_.storage())); |
| 145 | + if (other__impl_saved) AT_ASSERT(other__impl_saved == other_.getIntrusivePtr()); |
| 146 | + #endif |
| 147 | + if (grad_fn) { |
| 148 | + set_history(flatten_tensor_args( result ), grad_fn); |
| 149 | + } |
| 150 | + if (tracer_state) { |
| 151 | + jit::tracer::setTracingState(std::move(tracer_state)); |
| 152 | + jit::tracer::addOutput(node, result); |
| 153 | + } |
| 154 | + return result; |
| 155 | +} |
| 156 | +Tensor sub(const Tensor & self, const Tensor & other, Scalar alpha) { |
| 157 | + RECORD_FUNCTION("sub", std::vector<c10::IValue>({self, other, alpha}), Node::peek_at_next_sequence_nr()); |
| 158 | + auto& self_ = unpack(self, "self", 0); |
| 159 | + auto& other_ = unpack(other, "other", 1); |
| 160 | + std::shared_ptr<SubBackward0> grad_fn; |
| 161 | + if (compute_requires_grad( self, other )) { |
| 162 | + grad_fn = std::shared_ptr<SubBackward0>(new SubBackward0(), deleteNode); |
| 163 | + grad_fn->set_next_edges(collect_next_edges( self, other )); |
| 164 | + grad_fn->alpha = alpha; |
| 165 | + } |
| 166 | + torch::jit::Node* node = nullptr; |
| 167 | + std::shared_ptr<jit::tracer::TracingState> tracer_state; |
| 168 | + if (jit::tracer::isTracing()) { |
| 169 | + tracer_state = jit::tracer::getTracingState(); |
| 170 | + at::Symbol op_name; |
| 171 | + op_name = jit::Symbol::fromQualString("aten::sub"); |
| 172 | + node = tracer_state->graph->create(op_name, /*num_outputs=*/0); |
| 173 | + jit::tracer::recordSourceLocation(node); |
| 174 | + jit::tracer::addInputs(node, "self", self); |
| 175 | + jit::tracer::addInputs(node, "other", other); |
| 176 | + jit::tracer::addInputs(node, "alpha", alpha); |
| 177 | + tracer_state->graph->insertNode(node); |
| 178 | + |
| 179 | + jit::tracer::setTracingState(nullptr); |
| 180 | + } |
| 181 | + #ifndef NDEBUG |
| 182 | + c10::optional<Storage> self__storage_saved = |
| 183 | + self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; |
| 184 | + c10::intrusive_ptr<TensorImpl> self__impl_saved; |
| 185 | + if (self_.defined()) self__impl_saved = self_.getIntrusivePtr(); |
| 186 | + c10::optional<Storage> other__storage_saved = |
| 187 | + other_.has_storage() ? c10::optional<Storage>(other_.storage()) : c10::nullopt; |
| 188 | + c10::intrusive_ptr<TensorImpl> other__impl_saved; |
| 189 | + if (other_.defined()) other__impl_saved = other_.getIntrusivePtr(); |
| 190 | + #endif |
| 191 | + auto tmp = ([&]() { |
| 192 | + at::AutoNonVariableTypeMode non_var_type_mode(true); |
| 193 | + return at::sub(self_, other_, alpha); |
| 194 | + })(); |
| 195 | + auto result = std::move(tmp); |
| 196 | + #ifndef NDEBUG |
| 197 | + if (self__storage_saved.has_value()) |
| 198 | + AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); |
| 199 | + if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); |
| 200 | + if (other__storage_saved.has_value()) |
| 201 | + AT_ASSERT(other__storage_saved.value().is_alias_of(other_.storage())); |
| 202 | + if (other__impl_saved) AT_ASSERT(other__impl_saved == other_.getIntrusivePtr()); |
| 203 | + #endif |
| 204 | + if (grad_fn) { |
| 205 | + set_history(flatten_tensor_args( result ), grad_fn); |
| 206 | + } |
| 207 | + if (tracer_state) { |
| 208 | + jit::tracer::setTracingState(std::move(tracer_state)); |
| 209 | + jit::tracer::addOutput(node, result); |
| 210 | + } |
| 211 | + return result; |
| 212 | +} |
| 213 | + |
| 214 | +Tensor sum(const Tensor & self, c10::optional<ScalarType> dtype) { |
| 215 | + RECORD_FUNCTION("sum", std::vector<c10::IValue>({self}), Node::peek_at_next_sequence_nr()); |
| 216 | + auto& self_ = unpack(self, "self", 0); |
| 217 | + std::shared_ptr<SumBackward0> grad_fn; |
| 218 | + if (compute_requires_grad( self )) { |
| 219 | + grad_fn = std::shared_ptr<SumBackward0>(new SumBackward0(), deleteNode); |
| 220 | + grad_fn->set_next_edges(collect_next_edges( self )); |
| 221 | + grad_fn->self_sizes = self.sizes().vec(); |
| 222 | + } |
| 223 | + torch::jit::Node* node = nullptr; |
| 224 | + std::shared_ptr<jit::tracer::TracingState> tracer_state; |
| 225 | + if (jit::tracer::isTracing()) { |
| 226 | + tracer_state = jit::tracer::getTracingState(); |
| 227 | + at::Symbol op_name; |
| 228 | + op_name = jit::Symbol::fromQualString("aten::sum"); |
| 229 | + node = tracer_state->graph->create(op_name, /*num_outputs=*/0); |
| 230 | + jit::tracer::recordSourceLocation(node); |
| 231 | + jit::tracer::addInputs(node, "self", self); |
| 232 | + jit::tracer::addInputs(node, "dtype", dtype); |
| 233 | + tracer_state->graph->insertNode(node); |
| 234 | + |
| 235 | + jit::tracer::setTracingState(nullptr); |
| 236 | + } |
| 237 | + #ifndef NDEBUG |
| 238 | + c10::optional<Storage> self__storage_saved = |
| 239 | + self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; |
| 240 | + c10::intrusive_ptr<TensorImpl> self__impl_saved; |
| 241 | + if (self_.defined()) self__impl_saved = self_.getIntrusivePtr(); |
| 242 | + #endif |
| 243 | + auto tmp = ([&]() { |
| 244 | + at::AutoNonVariableTypeMode non_var_type_mode(true); |
| 245 | + return at::sum(self_, dtype); |
| 246 | + })(); |
| 247 | + auto result = std::move(tmp); |
| 248 | + #ifndef NDEBUG |
| 249 | + if (self__storage_saved.has_value()) |
| 250 | + AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); |
| 251 | + if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); |
| 252 | + #endif |
| 253 | + if (grad_fn) { |
| 254 | + set_history(flatten_tensor_args( result ), grad_fn); |
| 255 | + } |
| 256 | + if (tracer_state) { |
| 257 | + jit::tracer::setTracingState(std::move(tracer_state)); |
| 258 | + jit::tracer::addOutput(node, result); |
| 259 | + } |
| 260 | + return result; |
| 261 | +} |
| 262 | + |
| 263 | +Tensor pow(const Tensor & self, Scalar exponent) { |
| 264 | + RECORD_FUNCTION("pow", std::vector<c10::IValue>({self, exponent}), Node::peek_at_next_sequence_nr()); |
| 265 | + auto& self_ = unpack(self, "self", 0); |
| 266 | + std::shared_ptr<PowBackward0> grad_fn; |
| 267 | + if (compute_requires_grad( self )) { |
| 268 | + grad_fn = std::shared_ptr<PowBackward0>(new PowBackward0(), deleteNode); |
| 269 | + grad_fn->set_next_edges(collect_next_edges( self )); |
| 270 | + grad_fn->self_ = SavedVariable(self, false); |
| 271 | + grad_fn->exponent = exponent; |
| 272 | + } |
| 273 | + torch::jit::Node* node = nullptr; |
| 274 | + std::shared_ptr<jit::tracer::TracingState> tracer_state; |
| 275 | + if (jit::tracer::isTracing()) { |
| 276 | + tracer_state = jit::tracer::getTracingState(); |
| 277 | + at::Symbol op_name; |
| 278 | + op_name = jit::Symbol::fromQualString("aten::pow"); |
| 279 | + node = tracer_state->graph->create(op_name, /*num_outputs=*/0); |
| 280 | + jit::tracer::recordSourceLocation(node); |
| 281 | + jit::tracer::addInputs(node, "self", self); |
| 282 | + jit::tracer::addInputs(node, "exponent", exponent); |
| 283 | + tracer_state->graph->insertNode(node); |
| 284 | + |
| 285 | + jit::tracer::setTracingState(nullptr); |
| 286 | + } |
| 287 | + #ifndef NDEBUG |
| 288 | + c10::optional<Storage> self__storage_saved = |
| 289 | + self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; |
| 290 | + c10::intrusive_ptr<TensorImpl> self__impl_saved; |
| 291 | + if (self_.defined()) self__impl_saved = self_.getIntrusivePtr(); |
| 292 | + #endif |
| 293 | + auto tmp = ([&]() { |
| 294 | + at::AutoNonVariableTypeMode non_var_type_mode(true); |
| 295 | + return at::pow(self_, exponent); |
| 296 | + })(); |
| 297 | + auto result = std::move(tmp); |
| 298 | + #ifndef NDEBUG |
| 299 | + if (self__storage_saved.has_value()) |
| 300 | + AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); |
| 301 | + if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); |
| 302 | + #endif |
| 303 | + if (grad_fn) { |
| 304 | + set_history(flatten_tensor_args( result ), grad_fn); |
| 305 | + } |
| 306 | + if (tracer_state) { |
| 307 | + jit::tracer::setTracingState(std::move(tracer_state)); |
| 308 | + jit::tracer::addOutput(node, result); |
| 309 | + } |
| 310 | + return result; |
| 311 | +} |
| 312 | + |
| 313 | +}} |
| 314 | +}} |
| 315 | + |
0 commit comments