cyd0806 commited on
Commit
481470d
·
verified ·
1 Parent(s): 6d12472

Upload apex-master/csrc/megatron/scaled_upper_triang_masked_softmax.cpp with huggingface_hub

Browse files
apex-master/csrc/megatron/scaled_upper_triang_masked_softmax.cpp ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* coding=utf-8
2
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #include <cuda_fp16.h>
18
+ #include <torch/extension.h>
19
+ #include <vector>
20
+
21
+ namespace multihead_attn {
22
+ namespace fused_softmax {
23
+ namespace scaled_upper_triang_masked_softmax {
24
+
25
+ torch::Tensor fwd_cuda(
26
+ torch::Tensor const& input,
27
+ float scale_factor);
28
+
29
+ torch::Tensor bwd_cuda(
30
+ torch::Tensor const& output_grads,
31
+ torch::Tensor const& softmax_results,
32
+ float scale_factor);
33
+
34
+ torch::Tensor fwd(torch::Tensor const& input, float scale_factor) {
35
+ TORCH_CHECK(input.dim() == 3, "expected 3D tensor");
36
+ TORCH_CHECK((input.scalar_type() == at::ScalarType::Half) ||
37
+ (input.scalar_type() == at::ScalarType::BFloat16),
38
+ "Only fp16 and bf16 are supported");
39
+
40
+ return fwd_cuda(input, scale_factor);
41
+ }
42
+
43
+ torch::Tensor bwd(
44
+ torch::Tensor const& output_grads,
45
+ torch::Tensor const& softmax_results,
46
+ float scale_factor) {
47
+
48
+ TORCH_CHECK(output_grads.dim() == 3, "expected 3D tensor");
49
+ TORCH_CHECK(softmax_results.dim() == 3, "expected 3D tensor");
50
+
51
+ TORCH_CHECK((output_grads.scalar_type() == at::ScalarType::Half) ||
52
+ (output_grads.scalar_type() == at::ScalarType::BFloat16),
53
+ "Only fp16 and bf16 are supported");
54
+ TORCH_CHECK((softmax_results.scalar_type() == at::ScalarType::Half) ||
55
+ (softmax_results.scalar_type() == at::ScalarType::BFloat16),
56
+ "Only fp16 and bf16 are supported");
57
+
58
+ return bwd_cuda(output_grads, softmax_results, scale_factor);
59
+ }
60
+
61
+ } // end namespace scaled_upper_triang_masked_softmax
62
+ } // end namespace fused_softmax
63
+ } // end namespace multihead_attn
64
+
65
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
66
+ m.def("forward",
67
+ &multihead_attn::fused_softmax::scaled_upper_triang_masked_softmax::fwd,
68
+ "Self Multihead Attention scaled, time masked softmax -- Forward.", py::call_guard<py::gil_scoped_release>());
69
+ m.def("backward",
70
+ &multihead_attn::fused_softmax::scaled_upper_triang_masked_softmax::bwd,
71
+ "Self Multihead Attention scaled, time masked softmax -- Backward.", py::call_guard<py::gil_scoped_release>());
72
+ }