Skip to content

Commit 43621bc

Browse files
bertmaherfacebook-github-bot
authored andcommitted
Benchmark int4 gemm implementations (#2261)
Summary: Focusing on llama2-70b inference, this compares tinygemm (_weight_int4pack_mm) to a Triton implementation. Note that these are not numerically equivalent now, as the Triton implementation does not apply scale and zero point. TODO! Pull Request resolved: #2261 Test Plan: ``` pytorch run_benchmark.py triton --op int4_gemm ``` Performance in TFLOPS (although latency or maybe b/w would be better for the decoding sizes) ``` int4-gemm-performance: B m n k tinygemm triton 0 1.0 1.0 1280.0 8192.0 1.318632 0.146058 1 1.0 1.0 7168.0 8192.0 3.097060 0.763632 2 1.0 1.0 8192.0 1024.0 1.533006 0.279546 3 1.0 1.0 8192.0 3584.0 2.702516 0.842907 4 1.0 4096.0 1280.0 8192.0 29.048470 160.250400 5 1.0 4096.0 7168.0 8192.0 29.133376 260.258231 6 1.0 4096.0 8192.0 1024.0 27.168206 244.825143 7 1.0 4096.0 8192.0 3584.0 28.823074 251.058619 8 4.0 1.0 1280.0 8192.0 5.002748 0.582154 9 4.0 1.0 7168.0 8192.0 12.042711 3.029316 10 4.0 1.0 8192.0 1024.0 5.991863 1.152598 11 4.0 1.0 8192.0 3584.0 10.309034 3.343978 12 4.0 4096.0 1280.0 8192.0 29.119370 254.743029 13 4.0 4096.0 7168.0 8192.0 29.114149 261.838071 14 4.0 4096.0 8192.0 1024.0 27.197883 262.376217 15 4.0 4096.0 8192.0 3584.0 28.852035 255.000142 16 16.0 1.0 1280.0 8192.0 12.834468 2.328100 17 16.0 1.0 7168.0 8192.0 24.755590 12.119764 18 16.0 1.0 8192.0 1024.0 15.650388 4.454917 19 16.0 1.0 8192.0 3584.0 21.683995 13.388112 20 16.0 4096.0 1280.0 8192.0 29.148757 260.059109 21 16.0 4096.0 7168.0 8192.0 29.150796 269.369526 22 16.0 4096.0 8192.0 1024.0 27.245403 272.193003 23 16.0 4096.0 8192.0 3584.0 29.095535 263.722304 24 64.0 1.0 1280.0 8192.0 20.799920 9.287653 25 64.0 1.0 7168.0 8192.0 27.319690 38.018942 26 64.0 1.0 8192.0 1024.0 22.919694 17.567766 27 64.0 1.0 8192.0 3584.0 26.492333 42.643613 28 64.0 4096.0 1280.0 8192.0 29.424380 265.626026 29 64.0 4096.0 7168.0 8192.0 29.391893 260.692446 30 64.0 4096.0 8192.0 1024.0 27.452366 269.505851 31 64.0 4096.0 8192.0 3584.0 29.098774 252.047059 ``` Reviewed By: xuzhao9 Differential Revision: D57217321 Pulled By: bertmaher fbshipit-source-id: 3cc24d2cf57c8277189799eb1a61bb9a1d8ca5e3
1 parent c9f6193 commit 43621bc

File tree

3 files changed

+312
-0
lines changed

3 files changed

+312
-0
lines changed
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
from .int4_gemm import Operator
Lines changed: 145 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,145 @@
1+
"""
2+
Compute a bf16 (activation) x int4 (weight) gemm.
3+
Inspired by [gpt-fast](https://github.com/pytorch-labs/gpt-fast)
4+
ATen kernels from tinygemm
5+
Triton implementation by @jlebar: https://gist.github.com/jlebar/3435b2c00deea53258887ce37231e5e2
6+
"""
7+
8+
import argparse
9+
import os
10+
import statistics
11+
import torch
12+
import triton.ops
13+
import triton.language as tl
14+
15+
from typing import Any
16+
17+
from torchbenchmark.util.triton_op import (
18+
BenchmarkOperator,
19+
BenchmarkOperatorMetrics,
20+
register_benchmark,
21+
register_metric,
22+
)
23+
24+
from .kernel import pack_2xint4, matmul, matmul_kernel
25+
26+
27+
class Operator(BenchmarkOperator):
28+
DEFAULT_METRICS = ["tflops", "gbps", "latency"]
29+
30+
def __init__(self, mode, device, extra_args):
31+
super().__init__(mode=mode, device=device, extra_args=extra_args)
32+
# `Group size` and `inner K tiles` are defaults from gpt-fast.
33+
self.group_size = 32
34+
self.inner_k_tiles = 8
35+
36+
def get_input_iter(self):
37+
def args(B, L, Dout, Din):
38+
x = torch.randn(B, L, Din, device=self.device, dtype=torch.bfloat16)
39+
w = torch.randint(-8, 7, (Din, Dout), device=self.device, dtype=torch.int32)
40+
scales_and_zeros = torch.randn(
41+
Din // self.group_size,
42+
Dout,
43+
2,
44+
device=self.device,
45+
dtype=torch.bfloat16,
46+
)
47+
return (x, w, scales_and_zeros)
48+
49+
# LLama-2 shapes w/ 8-way tensor parallelism.
50+
name_to_shapes_70b = {
51+
"attn.wqkv": (8192, 1280),
52+
"attn.w0": (1024, 8192),
53+
"ffn.w13": (8192, 7168),
54+
"ffn.w2": (3584, 8192),
55+
}
56+
for seq_len in (1, 4096):
57+
for bsz in (1, 4, 16, 64):
58+
for name, (k, n) in name_to_shapes_70b.items():
59+
yield args(bsz, seq_len, n, k)
60+
61+
def get_x_val(self, example_inputs) -> float:
62+
x, w, scales_and_zeros = example_inputs
63+
B, m, k = x.size()
64+
_, n = w.size()
65+
return (B, m, n, k)
66+
67+
@register_benchmark(baseline=True)
68+
def tinygemm(self, x, w, scales_and_zeros):
69+
x = x.reshape(-1, x.size(-1))
70+
w_int4 = torch.ops.aten._convert_weight_to_int4pack(
71+
w.T.contiguous(), self.inner_k_tiles
72+
)
73+
return lambda: torch.ops.aten._weight_int4pack_mm(
74+
x, w_int4, self.group_size, scales_and_zeros
75+
)
76+
77+
@register_benchmark()
78+
def triton(self, x, w, scales_and_zeros):
79+
x = x.reshape(-1, x.size(-1))
80+
w_int4 = pack_2xint4(w).T.contiguous().T
81+
return lambda: matmul(x, w_int4)
82+
83+
@register_metric()
84+
def best_config(self, fn, inputs, metrics):
85+
if "triton" in str(fn):
86+
return str(matmul_kernel.best_config)
87+
return ""
88+
89+
@register_metric()
90+
def gbps(self, fn, example_inputs: Any, metrics: BenchmarkOperatorMetrics) -> float:
91+
def nbytes(t):
92+
return t.numel() * t.element_size()
93+
94+
x, w, scale_and_zero = example_inputs
95+
c = fn()
96+
97+
gb = (sum(nbytes(t) for t in (x, scale_and_zero, c)) + nbytes(w) // 8) / 1e9
98+
return list(map(lambda ms: gb / ms * 1e3, metrics.latency))
99+
100+
@register_metric()
101+
def tflops(
102+
self, fn_name: str, example_inputs: Any, metrics: BenchmarkOperatorMetrics
103+
) -> float:
104+
a, b, _ = example_inputs
105+
B, m, k = a.size()
106+
m = B * m
107+
_, n = b.size()
108+
flops = 2 * m * n * k
109+
return [flops / x / 1e12 * 1e3 for x in metrics.latency]
110+
111+
def plot(self):
112+
@triton.testing.perf_report(
113+
triton.testing.Benchmark(
114+
x_names=[
115+
"B",
116+
"m",
117+
"n",
118+
"k",
119+
], # argument names to use as an x-axis for the plot
120+
x_vals=self.output.x_vals, # different possible values for `x_name`
121+
line_arg="provider", # argument name whose value corresponds to a different line in the plot
122+
line_vals=[
123+
"tinygemm",
124+
"triton",
125+
], # possible values for `line_arg``
126+
line_names=[
127+
"tinygemm",
128+
"triton",
129+
], # label name for the lines
130+
styles=[("blue", "-"), ("green", "-")],
131+
ylabel="tflops", # label name for the y-axis
132+
plot_name="int4-gemm-performance", # name for the plot. Used also as a file name for saving the plot.
133+
args={}, # values for function arguments not in `x_names` and `y_name`
134+
)
135+
)
136+
def _plot(B, m, n, k, provider):
137+
tflops = self.output.get_y_vals((B, m, n, k), provider, "tflops")
138+
return tflops
139+
140+
save_path = "/tmp/int4_gemm"
141+
142+
if not os.path.exists(save_path):
143+
os.mkdir(save_path)
144+
145+
_plot.run(show_plots=True, print_data=True, save_path=save_path)
Lines changed: 166 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,166 @@
1+
"""
2+
Triton implementation by @jlebar: https://gist.github.com/jlebar/3435b2c00deea53258887ce37231e5e2
3+
"""
4+
5+
import torch
6+
import triton
7+
import triton.language as tl
8+
9+
AUTOTUNE_CONFIGS = [
10+
triton.Config(
11+
{
12+
"BLOCK_SIZE_M": 16,
13+
"BLOCK_SIZE_N": 128,
14+
"BLOCK_SIZE_K": 256,
15+
"GROUP_SIZE_M": 32,
16+
},
17+
num_stages=4,
18+
num_warps=4,
19+
),
20+
triton.Config(
21+
{
22+
"BLOCK_SIZE_M": 128,
23+
"BLOCK_SIZE_N": 256,
24+
"BLOCK_SIZE_K": 128,
25+
"GROUP_SIZE_M": 32,
26+
},
27+
num_stages=4,
28+
num_warps=8,
29+
),
30+
]
31+
32+
33+
@triton.autotune(configs=AUTOTUNE_CONFIGS, key=["M", "N", "K"])
34+
@triton.jit
35+
def matmul_kernel(
36+
# Pointers to matrices
37+
a_ptr,
38+
b_ptr,
39+
c_ptr,
40+
# Matrix dimensions.
41+
M,
42+
N,
43+
K,
44+
# The stride variables represent how much to increase the ptr by when moving by 1
45+
# element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
46+
# by to get the element one row down (A has M rows).
47+
#
48+
# We assume `b` is packed with 2 `int4` elements per K, i.e. it's a
49+
# (K//2)xNx(2xint4) matrix, represented in Triton as (K//2)xNxi8. If K
50+
# is the minor dimension, then stride_bk should logically be 0.5. But
51+
# we don't want a fractional stride! So let the given stride be the
52+
# stride per 2xint4.
53+
stride_am,
54+
stride_ak,
55+
stride_bk,
56+
stride_bn,
57+
stride_cm,
58+
stride_cn,
59+
# Meta-parameters
60+
BLOCK_SIZE_M: tl.constexpr,
61+
BLOCK_SIZE_N: tl.constexpr,
62+
BLOCK_SIZE_K: tl.constexpr,
63+
GROUP_SIZE_M: tl.constexpr,
64+
):
65+
"""Kernel for computing the matmul C = A x B.
66+
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
67+
"""
68+
tl.device_assert(K % BLOCK_SIZE_K == 0)
69+
70+
# -----------------------------------------------------------
71+
# Map program ids `pid` to the block of C it should compute.
72+
# This is done in a grouped ordering to promote L2 data reuse.
73+
# See above `L2 Cache Optimizations` section for details.
74+
pid = tl.program_id(axis=0)
75+
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
76+
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
77+
num_pid_in_group = GROUP_SIZE_M * num_pid_n
78+
group_id = pid // num_pid_in_group
79+
first_pid_m = group_id * GROUP_SIZE_M
80+
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
81+
pid_m = first_pid_m + (pid % group_size_m)
82+
pid_n = (pid % num_pid_in_group) // group_size_m
83+
84+
# ----------------------------------------------------------
85+
# Create pointers for the first blocks of A and B.
86+
# We will advance this pointer as we move in the K direction
87+
# and accumulate
88+
# `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
89+
# `b_ptrs` is a block of [BLOCK_SIZE_K // 2, BLOCK_SIZE_N] pointers
90+
# See above `Pointer Arithmetic` section for details
91+
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
92+
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
93+
offs_ak = tl.arange(0, BLOCK_SIZE_K)
94+
offs_bk = tl.arange(0, BLOCK_SIZE_K // 2)
95+
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_ak[None, :] * stride_ak)
96+
b_ptrs = b_ptr + (offs_bk[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
97+
98+
# -----------------------------------------------------------
99+
# Iterate to compute a block of the C matrix.
100+
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
101+
# of fp32 values for higher accuracy.
102+
# `accumulator` will be converted back to fp16 after the loop.
103+
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
104+
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
105+
a = tl.load(a_ptrs, mask=offs_ak[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
106+
b = tl.load(b_ptrs)
107+
tl.static_assert(b.dtype == tl.int8)
108+
109+
# Unpack `b` into an fp16 matrix, taking care to sign-extend b_lo. Use
110+
# _4_i8 because the literal "4" is considered an i32, which causes the
111+
# shift operands to be widened to i32.
112+
_4_i8 = tl.full((1,), 4, dtype=tl.int8)
113+
b_lo = (b << _4_i8) >> _4_i8
114+
b_hi = b >> _4_i8
115+
# Workaround: Convert before the join() so that Triton can load the data
116+
# after the join using ldmatrix.
117+
b_f16 = (
118+
tl.join(b_lo.to(tl.bfloat16), b_hi.to(tl.bfloat16))
119+
.permute(0, 2, 1)
120+
.reshape(BLOCK_SIZE_K, BLOCK_SIZE_N)
121+
)
122+
123+
accumulator += tl.dot(a, b_f16)
124+
a_ptrs += BLOCK_SIZE_K * stride_ak
125+
b_ptrs += BLOCK_SIZE_K * stride_bk // 2
126+
127+
c = accumulator.to(tl.bfloat16)
128+
129+
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
130+
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
131+
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
132+
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
133+
tl.store(c_ptrs, c, mask=c_mask)
134+
135+
136+
def matmul(a, b):
137+
assert a.shape[1] == b.shape[0] * 2, "Incompatible dimensions"
138+
assert a.is_contiguous(), "Matrix A must be contiguous"
139+
M, K = a.shape
140+
_, N = b.shape
141+
142+
c = torch.empty((M, N), device=a.device, dtype=torch.bfloat16)
143+
grid = lambda META: (
144+
triton.cdiv(M, META["BLOCK_SIZE_M"]) * triton.cdiv(N, META["BLOCK_SIZE_N"]),
145+
)
146+
matmul_kernel[grid](
147+
a,
148+
b,
149+
c,
150+
M,
151+
N,
152+
K,
153+
a.stride(0),
154+
a.stride(1),
155+
b.stride(0),
156+
b.stride(1),
157+
c.stride(0),
158+
c.stride(1),
159+
)
160+
return c
161+
162+
163+
def pack_2xint4(t):
164+
# Packs a KxNxfp16 matrix into a (K//2)xNx(2xint4) matrix.
165+
t = t.to(torch.int8).reshape(t.shape[0] // 2, 2, t.shape[1]).permute(1, 0, 2)
166+
return (t[0] & 0xF) | (t[1] << 4)

0 commit comments

Comments
 (0)