Skip to content

Commit 2a442f5

Browse files
zdevitofacebook-github-bot
authored andcommitted
Revert D18499601: Add missing operators for PyText model.
Test Plan: revert-hammer Differential Revision: D18499601 Original commit changeset: 8a38d3d809ee fbshipit-source-id: 4f28f291bd7020f1fc9fc313bc766b5dbf5b1b90
1 parent c543034 commit 2a442f5

File tree

2 files changed

+2
-139
lines changed

2 files changed

+2
-139
lines changed

torch/csrc/jit/instruction.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ OpCode parseOpCode(const char *str) {
7070

7171
bool isOpSupportedInMobile(OpCode op) {
7272
static constexpr OpCode supported_ops_in_mobile[] {
73-
OP, OPN, LOAD, MOVE, STOREN, STORE, DROP, DROPR, LOADC, JF, JMP, LOOP, RET, GET_ATTR, SET_ATTR
73+
OP, OPN, LOAD, MOVE, STOREN, STORE, DROP, DROPR, LOADC, JF, LOOP, RET, GET_ATTR, SET_ATTR
7474
};
7575

7676
for (auto sop : supported_ops_in_mobile) {

torch/csrc/jit/mobile/register_mobile_ops.cpp

Lines changed: 1 addition & 138 deletions
Original file line numberDiff line numberDiff line change
@@ -302,7 +302,7 @@ static auto registry0 = torch::RegisterOperators().op(
302302
[]() {
303303
})
304304
).op(
305-
"_prim::ListConstruct.Tensor",
305+
"_prim::ListConstruct.tensor",
306306
torch::RegisterOperators::options().catchAllKernel(
307307
[]() {
308308
})
@@ -311,143 +311,6 @@ static auto registry0 = torch::RegisterOperators().op(
311311
torch::RegisterOperators::options().catchAllKernel(
312312
[]() {
313313
})
314-
// Pytext operators
315-
).op(
316-
"_aten::embedding(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor",
317-
torch::RegisterOperators::options().kernel(c10::TensorTypeId::CPUTensorId,
318-
[](c10::OperatorKernel* kernel, Stack* stack) {
319-
constexpr int N = 5;
320-
auto result_ = at::embedding(
321-
(std::move(peek(*stack, 0, N))).toTensor(),
322-
(std::move(peek(*stack, 1, N))).toTensor(),
323-
(std::move(peek(*stack, 2, N))).toInt(),
324-
(std::move(peek(*stack, 3, N))).toBool(),
325-
(std::move(peek(*stack, 4, N))).toBool()
326-
);
327-
drop(*stack, N);
328-
pack(*stack, std::move(result_));
329-
})
330-
).op(
331-
"_aten::dropout(Tensor input, float p, bool train) -> Tensor",
332-
torch::RegisterOperators::options().kernel(c10::TensorTypeId::CPUTensorId,
333-
[](c10::OperatorKernel* kernel, Stack* stack) {
334-
auto result_ = at::dropout(
335-
(std::move(peek(*stack, 0, 3))).toTensor(),
336-
(std::move(peek(*stack, 1, 3))).toDouble(),
337-
(std::move(peek(*stack, 2, 3))).toBool()
338-
);
339-
drop(*stack, 3);
340-
pack(*stack, std::move(result_));
341-
})
342-
).op(
343-
"_aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)",
344-
torch::RegisterOperators::options().kernel(c10::TensorTypeId::CPUTensorId,
345-
[](c10::OperatorKernel* kernel, Stack* stack) {
346-
auto result_ = ((std::move(peek(*stack, 0, 2))).toTensor()).permute(
347-
(std::move(peek(*stack, 1, 2))).toIntListRef()
348-
);
349-
drop(*stack, 2);
350-
pack(*stack, std::move(result_));
351-
}).aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA)
352-
).op(
353-
"_aten::matmul(Tensor self, Tensor other) -> Tensor",
354-
torch::RegisterOperators::options().kernel(c10::TensorTypeId::CPUTensorId,
355-
[](c10::OperatorKernel* kernel, Stack* stack) {
356-
auto result_ = at::matmul(
357-
(std::move(peek(*stack, 0, 2))).toTensor(),
358-
(std::move(peek(*stack, 1, 2))).toTensor()
359-
);
360-
drop(*stack, 2);
361-
pack(*stack, std::move(result_));
362-
})
363-
).op(
364-
"_aten::mul.Tensor(Tensor self, Tensor other) -> Tensor",
365-
torch::RegisterOperators::options().kernel(c10::TensorTypeId::CPUTensorId,
366-
[](c10::OperatorKernel* kernel, Stack* stack) {
367-
auto result_ = at::mul(
368-
(std::move(peek(*stack, 0, 2))).toTensor(),
369-
(std::move(peek(*stack, 1, 2))).toTensor()
370-
);
371-
drop(*stack, 2);
372-
pack(*stack, std::move(result_));
373-
})
374-
).op(
375-
"_aten::tanh(Tensor self) -> Tensor",
376-
torch::RegisterOperators::options().kernel(c10::TensorTypeId::CPUTensorId,
377-
[](c10::OperatorKernel* kernel, Stack* stack) {
378-
auto result_ = at::tanh(
379-
(std::move(peek(*stack, 0, 1))).toTensor()
380-
);
381-
drop(*stack, 1);
382-
pack(*stack, std::move(result_));
383-
})
384-
).op(
385-
"_aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)",
386-
torch::RegisterOperators::options().kernel(c10::TensorTypeId::CPUTensorId,
387-
[](c10::OperatorKernel* kernel, Stack* stack) {
388-
auto result_ = at::max(
389-
(std::move(peek(*stack, 0, 3))).toTensor(),
390-
(std::move(peek(*stack, 1, 3))).toInt(),
391-
(std::move(peek(*stack, 2, 3))).toBool()
392-
);
393-
drop(*stack, 3);
394-
pack(*stack, std::move(result_));
395-
})
396-
).op(
397-
"_aten::cat(Tensor[] tensors, int dim=0) -> Tensor",
398-
torch::RegisterOperators::options().kernel(c10::TensorTypeId::CPUTensorId,
399-
[](c10::OperatorKernel* kernel, Stack* stack) {
400-
auto result_ = at::cat(
401-
(std::move(peek(*stack, 0, 2))).toTensorListRef(),
402-
(std::move(peek(*stack, 1, 2))).toInt()
403-
);
404-
drop(*stack, 2);
405-
pack(*stack, std::move(result_));
406-
})
407-
).op(
408-
"_aten::__is__(t1 self, t2 obj) -> bool",
409-
torch::RegisterOperators::options().catchAllKernel(
410-
[](c10::OperatorKernel* kernel, Stack* stack) {
411-
c10::IValue self, obj;
412-
pop(*stack, self, obj);
413-
push(*stack, self.isSameIdentity(obj));
414-
})
415-
).op(
416-
"_aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor",
417-
torch::RegisterOperators::options().kernel(c10::TensorTypeId::CPUTensorId,
418-
[](c10::OperatorKernel* kernel, Stack* stack) {
419-
auto result_ = at::log_softmax(
420-
(std::move(peek(*stack, 0, 3))).toTensor(),
421-
(std::move(peek(*stack, 1, 3))).toInt(),
422-
(std::move(peek(*stack, 2, 3))).toOptional<c10::ScalarType>()
423-
);
424-
drop(*stack, 3);
425-
pack(*stack, std::move(result_));
426-
})
427-
).op(
428-
"_aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor",
429-
torch::RegisterOperators::options().kernel(c10::TensorTypeId::CPUTensorId,
430-
[](c10::OperatorKernel* kernel, Stack* stack) {
431-
auto result_ = at::softmax(
432-
(std::move(peek(*stack, 0, 3))).toTensor(),
433-
(std::move(peek(*stack, 1, 3))).toInt(),
434-
(std::move(peek(*stack, 2, 3))).toOptional<c10::ScalarType>()
435-
);
436-
drop(*stack, 3);
437-
pack(*stack, std::move(result_));
438-
})
439-
).op(
440-
"_aten::warn() -> void",
441-
torch::RegisterOperators::options().catchAllKernel(
442-
[](c10::OperatorKernel* kernel, Stack* stack) {
443-
drop(*stack, 1);
444-
pop(*stack);
445-
})
446-
).op(
447-
"_prim::unchecked_cast",
448-
torch::RegisterOperators::options().catchAllKernel(
449-
[]() {
450-
})
451314
).op(
452315
"_prim::TupleConstruct",
453316
torch::RegisterOperators::options().catchAllKernel(

0 commit comments

Comments
 (0)