[Mesa-dev] [PATCH 03/22] nir/spirv: initial handling of OpenCL.std extension opcodes
Karol Herbst
kherbst at redhat.com
Tue Nov 13 15:48:07 UTC 2018
Not complete, mostly just adding things as I encounter them in CTS. But
not getting far enough yet to hit most of the OpenCL.std instructions.
Anyway, this is better than nothing and covers the most common builtins.
Signed-off-by: Karol Herbst <kherbst at redhat.com>
---
src/compiler/nir/meson.build | 1 +
src/compiler/nir/nir_builtin_builder.c | 249 +++++++++++++++++++++-
src/compiler/nir/nir_builtin_builder.h | 150 ++++++++++++-
src/compiler/spirv/spirv_to_nir.c | 2 +
src/compiler/spirv/vtn_alu.c | 15 ++
src/compiler/spirv/vtn_glsl450.c | 2 +-
src/compiler/spirv/vtn_opencl.c | 284 +++++++++++++++++++++++++
src/compiler/spirv/vtn_private.h | 3 +
8 files changed, 701 insertions(+), 5 deletions(-)
create mode 100644 src/compiler/spirv/vtn_opencl.c
diff --git a/src/compiler/nir/meson.build b/src/compiler/nir/meson.build
index b0c3a7feb31..00d7f56e6eb 100644
--- a/src/compiler/nir/meson.build
+++ b/src/compiler/nir/meson.build
@@ -206,6 +206,7 @@ files_libnir = files(
'../spirv/vtn_amd.c',
'../spirv/vtn_cfg.c',
'../spirv/vtn_glsl450.c',
+ '../spirv/vtn_opencl.c',
'../spirv/vtn_private.h',
'../spirv/vtn_subgroup.c',
'../spirv/vtn_variables.c',
diff --git a/src/compiler/nir/nir_builtin_builder.c b/src/compiler/nir/nir_builtin_builder.c
index 252a7691f36..e37915e92ca 100644
--- a/src/compiler/nir/nir_builtin_builder.c
+++ b/src/compiler/nir/nir_builtin_builder.c
@@ -21,11 +21,43 @@
* IN THE SOFTWARE.
*/
+#include <math.h>
+
#include "nir.h"
#include "nir_builtin_builder.h"
nir_ssa_def*
-nir_cross(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+nir_iadd_sat(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+ int64_t max;
+ switch (x->bit_size) {
+ case 64:
+ max = INT64_MAX;
+ break;
+ case 32:
+ max = INT32_MAX;
+ break;
+ case 16:
+ max = INT16_MAX;
+ break;
+ case 8:
+ max = INT8_MAX;
+ break;
+ }
+
+ nir_ssa_def *sum = nir_iadd(b, x, y);
+
+ nir_ssa_def *hi = nir_bcsel(b, nir_ilt(b, sum, x),
+ nir_imm_intN_t(b, max, x->bit_size), sum);
+
+ nir_ssa_def *lo = nir_bcsel(b, nir_ilt(b, x, sum),
+ nir_imm_intN_t(b, max + 1, x->bit_size), sum);
+
+ return nir_bcsel(b, nir_ige(b, y, nir_imm_intN_t(b, 1, y->bit_size)), hi, lo);
+}
+
+nir_ssa_def*
+nir_cross3(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
{
unsigned yzx[3] = { 1, 2, 0 };
unsigned zxy[3] = { 2, 0, 1 };
@@ -36,6 +68,63 @@ nir_cross(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
nir_swizzle(b, y, yzx, 3, true)));
}
+nir_ssa_def*
+nir_cross4(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+ nir_ssa_def *cross = nir_cross3(b, x, y);
+
+ return nir_vec4(b,
+ nir_channel(b, cross, 0),
+ nir_channel(b, cross, 1),
+ nir_channel(b, cross, 2),
+ nir_imm_intN_t(b, 0, cross->bit_size));
+}
+
+static nir_ssa_def*
+nir_hadd(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, bool sign)
+{
+ nir_ssa_def *imm1 = nir_imm_int(b, 1);
+
+ nir_ssa_def *t0 = nir_ixor(b, x, y);
+ nir_ssa_def *t1 = nir_iand(b, x, y);
+
+ nir_ssa_def *t2;
+ if (sign)
+ t2 = nir_ishr(b, t0, imm1);
+ else
+ t2 = nir_ushr(b, t0, imm1);
+ return nir_iadd(b, t1, t2);
+}
+
+nir_ssa_def*
+nir_ihadd(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+ return nir_hadd(b, x, y, true);
+}
+
+nir_ssa_def*
+nir_uhadd(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+ return nir_hadd(b, x, y, false);
+}
+
+nir_ssa_def*
+nir_length(nir_builder *b, nir_ssa_def *vec)
+{
+ nir_ssa_def *finf = nir_imm_floatN_t(b, INFINITY, vec->bit_size);
+
+ nir_ssa_def *abs = nir_fabs(b, vec);
+ if (vec->num_components == 1)
+ return abs;
+
+ nir_ssa_def *maxc = nir_fmax(b, nir_channel(b, abs, 0), nir_channel(b, abs, 1));
+ for (int i = 2; i < vec->num_components; ++i)
+ maxc = nir_fmax(b, maxc, nir_channel(b, abs, i));
+ abs = nir_fdiv(b, abs, maxc);
+ nir_ssa_def *res = nir_fmul(b, nir_fsqrt(b, nir_fdot(b, abs, abs)), maxc);
+ return nir_bcsel(b, nir_feq(b, maxc, finf), maxc, res);
+}
+
nir_ssa_def*
nir_fast_length(nir_builder *b, nir_ssa_def *vec)
{
@@ -49,6 +138,107 @@ nir_fast_length(nir_builder *b, nir_ssa_def *vec)
}
}
+nir_ssa_def*
+nir_nextafter(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+ nir_ssa_def *zero = nir_imm_intN_t(b, 0, x->bit_size);
+ nir_ssa_def *one = nir_imm_intN_t(b, 1, x->bit_size);
+ nir_ssa_def *nzero = nir_imm_intN_t(b, 1ull << (x->bit_size - 1), x->bit_size);
+
+ nir_ssa_def *condeq = nir_feq(b, x, y);
+ nir_ssa_def *conddir = nir_flt(b, x, y);
+ nir_ssa_def *condnzero = nir_feq(b, x, nzero);
+
+ // beware of -0.0 - 1 == NaN
+ nir_ssa_def *xn =
+ nir_bcsel(b,
+ condnzero,
+ nir_imm_intN_t(b, (1 << (x->bit_size - 1)) + 1, x->bit_size),
+ nir_isub(b, x, one));
+
+ // beware of -0.0 + 1 == -0x1p-149
+ nir_ssa_def *xp = nir_bcsel(b, condnzero, one, nir_iadd(b, x, one));
+
+ // nextafter can be implemented by just +/- 1 on the int value
+ nir_ssa_def *resp = nir_bcsel(b, conddir, xp, xn);
+ nir_ssa_def *resn = nir_bcsel(b, conddir, xn, xp);
+
+ nir_ssa_def *res = nir_bcsel(b, nir_flt(b, x, zero), resn, resp);
+
+ return nir_nan_check2(b, x, y, nir_bcsel(b, condeq, x, res));
+}
+
+nir_ssa_def*
+nir_normalize(nir_builder *b, nir_ssa_def *vec)
+{
+ nir_ssa_def *f0 = nir_imm_floatN_t(b, 0.0, vec->bit_size);
+
+ nir_ssa_def *maxc;
+ nir_ssa_def *res;
+ if (vec->num_components == 1) {
+ nir_ssa_def *f1p = nir_imm_floatN_t(b, 1.0, vec->bit_size);
+ nir_ssa_def *f1n = nir_imm_floatN_t(b, -1.0, vec->bit_size);
+
+ nir_ssa_def *cond = nir_flt(b, vec, f0);
+ res = nir_bcsel(b, cond, f1n, f1p);
+ maxc = vec;
+ } else {
+ maxc = nir_fmax(b, nir_fabs(b, nir_channel(b, vec, 0)),
+ nir_fabs(b, nir_channel(b, vec, 1)));
+ for (int i = 2; i < vec->num_components; ++i)
+ maxc = nir_fmax(b, maxc, nir_fabs(b, nir_channel(b, vec, i)));
+ nir_ssa_def *temp = nir_fdiv(b, vec, maxc);
+ res = nir_fmul(b, temp, nir_frsq(b, nir_fdot(b, temp, temp)));
+ }
+ return nir_bcsel(b, nir_feq(b, maxc, f0), vec, res);
+}
+
+static nir_ssa_def*
+nir_rhadd(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, bool sign)
+{
+ nir_ssa_def *imm1 = nir_imm_int(b, 1);
+
+ nir_ssa_def *t0 = nir_ixor(b, x, y);
+ nir_ssa_def *t1 = nir_ior(b, x, y);
+
+ nir_ssa_def *t2;
+ if (sign)
+ t2 = nir_ishr(b, t0, imm1);
+ else
+ t2 = nir_ushr(b, t0, imm1);
+
+ return nir_isub(b, t1, t2);
+}
+
+nir_ssa_def*
+nir_irhadd(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+ return nir_rhadd(b, x, y, true);
+}
+
+nir_ssa_def*
+nir_urhadd(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+ return nir_rhadd(b, x, y, false);
+}
+
+nir_ssa_def*
+nir_rotate(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+ nir_ssa_def *shift_mask = nir_imm_int(b, x->bit_size - 1);
+
+ if (y->bit_size != 32)
+ y = nir_u2u32(b, y);
+
+ nir_ssa_def *lshift = nir_iand(b, y, shift_mask);
+ nir_ssa_def *rshift = nir_isub(b, nir_imm_int(b, x->bit_size), lshift);
+
+ nir_ssa_def *hi = nir_ishl(b, x, lshift);
+ nir_ssa_def *lo = nir_ushr(b, x, rshift);
+
+ return nir_ior(b, hi, lo);
+}
+
nir_ssa_def*
nir_smoothstep(nir_builder *b, nir_ssa_def *edge0, nir_ssa_def *edge1, nir_ssa_def *x)
{
@@ -63,3 +253,60 @@ nir_smoothstep(nir_builder *b, nir_ssa_def *edge0, nir_ssa_def *edge1, nir_ssa_d
/* result = t * t * (3 - 2 * t) */
return nir_fmul(b, t, nir_fmul(b, t, nir_fsub(b, f3, nir_fmul(b, f2, t))));
}
+
+nir_ssa_def*
+nir_isub_sat(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+ uint64_t max = (1ull << (x->bit_size - 1)) - 1;
+
+ nir_ssa_def *diff = nir_isub(b, x, y);
+ nir_ssa_def *hi = nir_bcsel(b, nir_ilt(b, diff, x),
+ nir_imm_intN_t(b, max, x->bit_size), diff);
+ nir_ssa_def *lo = nir_bcsel(b, nir_ilt(b, x, diff),
+ nir_imm_intN_t(b, max + 1, x->bit_size), diff);
+ return nir_bcsel(b, nir_ilt(b, y, nir_imm_intN_t(b, 0, y->bit_size)), hi, lo);
+}
+
+nir_ssa_def*
+nir_iupsample(nir_builder *b, nir_ssa_def *hi, nir_ssa_def *lo)
+{
+ nir_ssa_def *hiup;
+ nir_ssa_def *loup;
+ switch (hi->bit_size) {
+ case 32:
+ hiup = nir_i2i64(b, hi);
+ loup = nir_i2i64(b, lo);
+ break;
+ case 16:
+ hiup = nir_i2i32(b, hi);
+ loup = nir_i2i32(b, lo);
+ break;
+ case 8:
+ hiup = nir_i2i16(b, hi);
+ loup = nir_i2i16(b, lo);
+ break;
+ }
+ return nir_ior(b, nir_ishl(b, hiup, nir_imm_int(b, hi->bit_size)), loup);
+}
+
+nir_ssa_def*
+nir_uupsample(nir_builder *b, nir_ssa_def *hi, nir_ssa_def *lo)
+{
+ nir_ssa_def *hiup;
+ nir_ssa_def *loup;
+ switch (hi->bit_size) {
+ case 32:
+ hiup = nir_u2u64(b, hi);
+ loup = nir_u2u64(b, lo);
+ break;
+ case 16:
+ hiup = nir_u2u32(b, hi);
+ loup = nir_u2u32(b, lo);
+ break;
+ case 8:
+ hiup = nir_u2u16(b, hi);
+ loup = nir_u2u16(b, lo);
+ break;
+ }
+ return nir_ior(b, nir_ishl(b, hiup, nir_imm_int(b, hi->bit_size)), loup);
+}
diff --git a/src/compiler/nir/nir_builtin_builder.h b/src/compiler/nir/nir_builtin_builder.h
index 0e5b9db462a..ee7726cae0b 100644
--- a/src/compiler/nir/nir_builtin_builder.h
+++ b/src/compiler/nir/nir_builtin_builder.h
@@ -31,10 +31,61 @@
* Definitions for functions in the C file come first.
*/
-nir_ssa_def* nir_cross(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y);
+nir_ssa_def* nir_iadd_sat(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y);
+nir_ssa_def* nir_cross3(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y);
+nir_ssa_def* nir_cross4(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y);
+nir_ssa_def* nir_length(nir_builder *b, nir_ssa_def *vec);
nir_ssa_def* nir_fast_length(nir_builder *b, nir_ssa_def *vec);
+nir_ssa_def* nir_ihadd(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y);
+nir_ssa_def* nir_uhadd(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y);
+nir_ssa_def* nir_nextafter(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y);
+nir_ssa_def* nir_normalize(nir_builder *b, nir_ssa_def *vec);
+nir_ssa_def* nir_irhadd(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y);
+nir_ssa_def* nir_urhadd(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y);
+nir_ssa_def* nir_rotate(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y);
nir_ssa_def* nir_smoothstep(nir_builder *b, nir_ssa_def *edge0,
nir_ssa_def *edge1, nir_ssa_def *x);
+nir_ssa_def* nir_isub_sat(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y);
+nir_ssa_def* nir_iupsample(nir_builder *b, nir_ssa_def *hi, nir_ssa_def *lo);
+nir_ssa_def* nir_uupsample(nir_builder *b, nir_ssa_def *hi, nir_ssa_def *lo);
+
+static inline nir_ssa_def *
+nir_nan_check2(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *res)
+{
+ return nir_bcsel(b, nir_fne(b, x, x), x, nir_bcsel(b, nir_fne(b, y, y), y, res));
+}
+
+static inline nir_ssa_def *
+nir_iabs_diff(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+ nir_ssa_def *cond = nir_ige(b, x, y);
+ nir_ssa_def *res0 = nir_isub(b, x, y);
+ nir_ssa_def *res1 = nir_isub(b, y, x);
+ return nir_bcsel(b, cond, res0, res1);
+}
+
+static inline nir_ssa_def *
+nir_uabs_diff(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+ nir_ssa_def *cond = nir_uge(b, x, y);
+ nir_ssa_def *res0 = nir_isub(b, x, y);
+ nir_ssa_def *res1 = nir_isub(b, y, x);
+ return nir_bcsel(b, cond, res0, res1);
+}
+
+static inline nir_ssa_def *
+nir_uadd_sat(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+ nir_ssa_def *sum = nir_iadd(b, x, y);
+ nir_ssa_def *cond = nir_ult(b, sum, x);
+ return nir_bcsel(b, cond, nir_imm_intN_t(b, -1l, x->bit_size), sum);
+}
+
+static inline nir_ssa_def *
+nir_bitselect(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *s)
+{
+ return nir_ior(b, nir_iand(b, nir_inot(b, s), x), nir_iand(b, s, y));
+}
static inline nir_ssa_def *
nir_fclamp(nir_builder *b,
@@ -57,10 +108,40 @@ nir_uclamp(nir_builder *b,
return nir_umin(b, nir_umax(b, x, min_val), max_val);
}
+static inline nir_ssa_def *
+nir_copysign(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+ uint64_t masks = 1ull << (x->bit_size - 1);
+ uint64_t maskv = ~masks;
+
+ nir_ssa_def *s = nir_imm_intN_t(b, masks, x->bit_size);
+ nir_ssa_def *v = nir_imm_intN_t(b, maskv, x->bit_size);
+
+ return nir_ior(b, nir_iand(b, x, v), nir_iand(b, y, s));
+}
+
static inline nir_ssa_def *
nir_degrees(nir_builder *b, nir_ssa_def *val)
{
- return nir_fmul(b, val, nir_imm_float(b, 57.2957795131));
+ nir_ssa_def *c = nir_imm_floatN_t(b, 57.29577951308232, val->bit_size);
+ return nir_fmul(b, val, c);
+}
+
+static inline nir_ssa_def *
+nir_fdim(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+ nir_ssa_def *cond = nir_flt(b, y, x);
+ nir_ssa_def *res = nir_fsub(b, x, y);
+ nir_ssa_def *zero = nir_imm_floatN_t(b, 0.0, x->bit_size);
+
+ // return NaN if either x or y are NaN, else x-y if x>y, else +0.0
+ return nir_nan_check2(b, x, y, nir_bcsel(b, cond, res, zero));
+}
+
+static inline nir_ssa_def *
+nir_distance(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+ return nir_length(b, nir_fsub(b, x, y));
}
static inline nir_ssa_def *
@@ -75,10 +156,73 @@ nir_fast_normalize(nir_builder *b, nir_ssa_def *vec)
return nir_fdiv(b, vec, nir_fast_length(b, vec));
}
+static inline nir_ssa_def*
+nir_fmad(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *z)
+{
+ return nir_fadd(b, nir_fmul(b, x, y), z);
+}
+
+static inline nir_ssa_def*
+nir_maxmag(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+ nir_ssa_def *xabs = nir_fabs(b, x);
+ nir_ssa_def *yabs = nir_fabs(b, y);
+
+ nir_ssa_def *condy = nir_flt(b, xabs, yabs);
+ nir_ssa_def *condx = nir_flt(b, yabs, xabs);
+
+ return nir_bcsel(b, condy, y, nir_bcsel(b, condx, x, nir_fmax(b, x, y)));
+}
+
+static inline nir_ssa_def*
+nir_minmag(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+ nir_ssa_def *xabs = nir_fabs(b, x);
+ nir_ssa_def *yabs = nir_fabs(b, y);
+
+ nir_ssa_def *condx = nir_flt(b, xabs, yabs);
+ nir_ssa_def *condy = nir_flt(b, yabs, xabs);
+
+ return nir_bcsel(b, condy, y, nir_bcsel(b, condx, x, nir_fmin(b, x, y)));
+}
+
+static inline nir_ssa_def*
+nir_nan(nir_builder *b, nir_ssa_def *x)
+{
+ nir_ssa_def *nan = nir_imm_floatN_t(b, NAN, x->bit_size);
+ if (x->num_components == 1)
+ return nan;
+
+ nir_ssa_def *nans[NIR_MAX_VEC_COMPONENTS];
+ for (unsigned i = 0; i < x->num_components; ++i)
+ nans[i] = nan;
+
+ return nir_vec(b, nans, x->num_components);
+}
+
static inline nir_ssa_def *
nir_radians(nir_builder *b, nir_ssa_def *val)
{
- return nir_fmul(b, val, nir_imm_float(b, 0.01745329251));
+ nir_ssa_def *c = nir_imm_floatN_t(b, 0.017453292519943295, val->bit_size);
+ return nir_fmul(b, val, c);
+}
+
+static inline nir_ssa_def *
+nir_select(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *s)
+{
+ if (s->num_components != 1) {
+ uint64_t mask = 1ull << (s->bit_size - 1);
+ s = nir_iand(b, s, nir_imm_intN_t(b, mask, s->bit_size));
+ }
+ return nir_bcsel(b, nir_ieq(b, s, nir_imm_intN_t(b, 0, s->bit_size)), x, y);
+}
+
+static inline nir_ssa_def *
+nir_usub_sat(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+ nir_ssa_def *sum = nir_isub(b, x, y);
+ nir_ssa_def *cond = nir_ult(b, x, y);
+ return nir_bcsel(b, cond, nir_imm_intN_t(b, 0, x->bit_size), sum);
}
#endif /* NIR_BUILTIN_BUILDER_H */
diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c
index 96ff09c3659..d72f07dc1f9 100644
--- a/src/compiler/spirv/spirv_to_nir.c
+++ b/src/compiler/spirv/spirv_to_nir.c
@@ -395,6 +395,8 @@ vtn_handle_extension(struct vtn_builder *b, SpvOp opcode,
} else if ((strcmp(ext, "SPV_AMD_shader_trinary_minmax") == 0)
&& (b->options && b->options->caps.trinary_minmax)) {
val->ext_handler = vtn_handle_amd_shader_trinary_minmax_instruction;
+ } else if (strcmp(ext, "OpenCL.std") == 0) {
+ val->ext_handler = vtn_handle_opencl_instruction;
} else {
vtn_fail("Unsupported extension: %s", ext);
}
diff --git a/src/compiler/spirv/vtn_alu.c b/src/compiler/spirv/vtn_alu.c
index 6860e7dc090..b1492c1501a 100644
--- a/src/compiler/spirv/vtn_alu.c
+++ b/src/compiler/spirv/vtn_alu.c
@@ -683,6 +683,21 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
break;
}
+ case SpvOpSignBitSet: {
+ unsigned src_bit_size = glsl_get_bit_size(vtn_src[0]->type);
+ if (src[0]->num_components == 1)
+ val->ssa->def =
+ nir_ushr(&b->nb, src[0], nir_imm_int(&b->nb, src_bit_size - 1));
+ else
+ val->ssa->def =
+ nir_ishr(&b->nb, src[0], nir_imm_int(&b->nb, src_bit_size - 1));
+
+ if (src_bit_size != 32)
+ val->ssa->def = nir_u2u32(&b->nb, val->ssa->def);
+
+ break;
+ }
+
default: {
bool swap;
unsigned src_bit_size = glsl_get_bit_size(vtn_src[0]->type);
diff --git a/src/compiler/spirv/vtn_glsl450.c b/src/compiler/spirv/vtn_glsl450.c
index 06a49e48e3f..389e067712b 100644
--- a/src/compiler/spirv/vtn_glsl450.c
+++ b/src/compiler/spirv/vtn_glsl450.c
@@ -585,7 +585,7 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
return;
case GLSLstd450Cross: {
- val->ssa->def = nir_cross(nb, src[0], src[1]);
+ val->ssa->def = nir_cross3(nb, src[0], src[1]);
return;
}
diff --git a/src/compiler/spirv/vtn_opencl.c b/src/compiler/spirv/vtn_opencl.c
new file mode 100644
index 00000000000..089e6168fd8
--- /dev/null
+++ b/src/compiler/spirv/vtn_opencl.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright © 2018 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark (robdclark at gmail.com)
+ */
+
+#include "math.h"
+
+#include "nir/nir_builtin_builder.h"
+
+#include "vtn_private.h"
+#include "OpenCL.std.h"
+
+typedef nir_ssa_def *(*nir_handler)(struct vtn_builder *b, enum OpenCLstd opcode,
+ unsigned num_srcs, nir_ssa_def **srcs,
+ const struct glsl_type *dest_type);
+
+static void
+handle_instr(struct vtn_builder *b, enum OpenCLstd opcode, const uint32_t *w,
+ unsigned count, nir_handler handler)
+{
+ const struct glsl_type *dest_type =
+ vtn_value(b, w[1], vtn_value_type_type)->type->type;
+
+ unsigned num_srcs = count - 5;
+ nir_ssa_def *srcs[3] = { NULL };
+ vtn_assert(num_srcs <= ARRAY_SIZE(srcs));
+ for (unsigned i = 0; i < num_srcs; i++) {
+ srcs[i] = vtn_ssa_value(b, w[i + 5])->def;
+ }
+
+ nir_ssa_def *result = handler(b, opcode, num_srcs, srcs, dest_type);
+ if (result) {
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ val->ssa = vtn_create_ssa_value(b, dest_type);
+ val->ssa->def = result;
+ } else {
+ vtn_assert(dest_type == glsl_void_type());
+ }
+}
+
+static nir_op
+nir_alu_op_for_opencl_opcode(struct vtn_builder *b, enum OpenCLstd opcode)
+{
+ switch (opcode) {
+ case Fabs: return nir_op_fabs;
+ case SAbs: return nir_op_iabs;
+ case Ceil: return nir_op_fceil;
+ case Cos: return nir_op_fcos;
+ case Exp2: return nir_op_fexp2;
+ case Log2: return nir_op_flog2;
+ case Floor: return nir_op_ffloor;
+ case Fma: return nir_op_ffma;
+ case Fmax: return nir_op_fmax;
+ case SMax: return nir_op_imax;
+ case UMax: return nir_op_umax;
+ case Fmin: return nir_op_fmin;
+ case SMin: return nir_op_imin;
+ case UMin: return nir_op_umin;
+ case Fmod: return nir_op_fmod;
+ case Mix: return nir_op_flrp;
+ case SMul_hi: return nir_op_imul_high;
+ case UMul_hi: return nir_op_umul_high;
+ case Popcount: return nir_op_bit_count;
+ case Pow: return nir_op_fpow;
+ case Remainder: return nir_op_frem;
+ case Rsqrt: return nir_op_frsq;
+ case Sign: return nir_op_fsign;
+ case Sin: return nir_op_fsin;
+ case Sqrt: return nir_op_fsqrt;
+ case Trunc: return nir_op_ftrunc;
+ /* uhm... */
+ case UAbs: return nir_op_imov;
+ default:
+ vtn_fail("No NIR equivalent");
+ }
+}
+
+static nir_ssa_def *
+handle_alu(struct vtn_builder *b, enum OpenCLstd opcode, unsigned num_srcs,
+ nir_ssa_def **srcs, const struct glsl_type *dest_type)
+{
+ return nir_build_alu(&b->nb, nir_alu_op_for_opencl_opcode(b, opcode),
+ srcs[0], srcs[1], srcs[2], NULL);
+}
+
+static nir_ssa_def *
+handle_special(struct vtn_builder *b, enum OpenCLstd opcode, unsigned num_srcs,
+ nir_ssa_def **srcs, const struct glsl_type *dest_type)
+{
+ nir_builder *nb = &b->nb;
+
+ switch (opcode) {
+ case SAbs_diff:
+ return nir_iabs_diff(nb, srcs[0], srcs[1]);
+ case UAbs_diff:
+ return nir_uabs_diff(nb, srcs[0], srcs[1]);
+ case SAdd_sat:
+ return nir_iadd_sat(nb, srcs[0], srcs[1]);
+ case UAdd_sat:
+ return nir_uadd_sat(nb, srcs[0], srcs[1]);
+ case Bitselect:
+ return nir_bitselect(nb, srcs[0], srcs[1], srcs[2]);
+ case FClamp:
+ return nir_fclamp(nb, srcs[0], srcs[1], srcs[2]);
+ case SClamp:
+ return nir_iclamp(nb, srcs[0], srcs[1], srcs[2]);
+ case UClamp:
+ return nir_uclamp(nb, srcs[0], srcs[1], srcs[2]);
+ case Copysign:
+ return nir_copysign(nb, srcs[0], srcs[1]);
+ case Cross:
+ if (glsl_get_components(dest_type) == 4)
+ return nir_cross4(nb, srcs[0], srcs[1]);
+ return nir_cross3(nb, srcs[0], srcs[1]);
+ case Degrees:
+ return nir_degrees(nb, srcs[0]);
+ case Fdim:
+ return nir_fdim(nb, srcs[0], srcs[1]);
+ case Distance:
+ return nir_distance(nb, srcs[0], srcs[1]);
+ case Fast_distance:
+ return nir_fast_distance(nb, srcs[0], srcs[1]);
+ case Fast_length:
+ return nir_fast_length(nb, srcs[0]);
+ case Fast_normalize:
+ return nir_fast_normalize(nb, srcs[0]);
+ case SHadd:
+ return nir_ihadd(nb, srcs[0], srcs[1]);
+ case UHadd:
+ return nir_uhadd(nb, srcs[0], srcs[1]);
+ case Length:
+ return nir_length(nb, srcs[0]);
+ case Mad:
+ return nir_fmad(nb, srcs[0], srcs[1], srcs[2]);
+ case Maxmag:
+ return nir_maxmag(nb, srcs[0], srcs[1]);
+ case Minmag:
+ return nir_minmag(nb, srcs[0], srcs[1]);
+ case Nan:
+ return nir_nan(nb, srcs[0]);
+ case Nextafter:
+ return nir_nextafter(nb, srcs[0], srcs[1]);
+ case Normalize:
+ return nir_normalize(nb, srcs[0]);
+ case Radians:
+ return nir_radians(nb, srcs[0]);
+ case SRhadd:
+ return nir_irhadd(nb, srcs[0], srcs[1]);
+ case URhadd:
+ return nir_urhadd(nb, srcs[0], srcs[1]);
+ case Rotate:
+ return nir_rotate(nb, srcs[0], srcs[1]);
+ case Smoothstep:
+ return nir_smoothstep(nb, srcs[0], srcs[1], srcs[2]);
+ case Select:
+ return nir_select(nb, srcs[0], srcs[1], srcs[2]);
+ case Step:
+ return nir_sge(nb, srcs[1], srcs[0]);
+ case SSub_sat:
+ return nir_isub_sat(nb, srcs[0], srcs[1]);
+ case USub_sat:
+ return nir_usub_sat(nb, srcs[0], srcs[1]);
+ case S_Upsample:
+ return nir_iupsample(nb, srcs[0], srcs[1]);
+ case U_Upsample:
+ return nir_uupsample(nb, srcs[0], srcs[1]);
+ default:
+ vtn_fail("No NIR equivalent");
+ return NULL;
+ }
+}
+
+static nir_ssa_def *
+handle_printf(struct vtn_builder *b, enum OpenCLstd opcode, unsigned num_srcs,
+ nir_ssa_def **srcs, const struct glsl_type *dest_type)
+{
+ /* hahah, yeah, right.. */
+ return nir_imm_int(&b->nb, -1);
+}
+
+bool
+vtn_handle_opencl_instruction(struct vtn_builder *b, uint32_t ext_opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (ext_opcode) {
+ case Fabs:
+ case SAbs:
+ case UAbs:
+ case Ceil:
+ case Cos:
+ case Exp2:
+ case Log2:
+ case Floor:
+ case Fma:
+ case Fmax:
+ case SMax:
+ case UMax:
+ case Fmin:
+ case SMin:
+ case UMin:
+ case Mix:
+ case Fmod:
+ case SMul_hi:
+ case UMul_hi:
+ case Popcount:
+ case Pow:
+ case Remainder:
+ case Rsqrt:
+ case Sign:
+ case Sin:
+ case Sqrt:
+ case Trunc:
+ handle_instr(b, ext_opcode, w, count, handle_alu);
+ return true;
+ case SAbs_diff:
+ case UAbs_diff:
+ case SAdd_sat:
+ case UAdd_sat:
+ case Bitselect:
+ case FClamp:
+ case SClamp:
+ case UClamp:
+ case Copysign:
+ case Cross:
+ case Degrees:
+ case Fdim:
+ case Distance:
+ case Fast_distance:
+ case Fast_length:
+ case Fast_normalize:
+ case SHadd:
+ case UHadd:
+ case Length:
+ case Mad:
+ case Maxmag:
+ case Minmag:
+ case Nan:
+ case Nextafter:
+ case Normalize:
+ case Radians:
+ case SRhadd:
+ case URhadd:
+ case Rotate:
+ case Select:
+ case Step:
+ case Smoothstep:
+ case SSub_sat:
+ case USub_sat:
+ case S_Upsample:
+ case U_Upsample:
+ handle_instr(b, ext_opcode, w, count, handle_special);
+ return true;
+ case Printf:
+ handle_instr(b, ext_opcode, w, count, handle_printf);
+ return true;
+ case Prefetch:
+ /* TODO maybe add a nir instruction for this? */
+ return true;
+ default:
+ vtn_fail("unhandled opencl opc: %u\n", ext_opcode);
+ return false;
+ }
+}
diff --git a/src/compiler/spirv/vtn_private.h b/src/compiler/spirv/vtn_private.h
index da7a04ce59f..643a88d1abe 100644
--- a/src/compiler/spirv/vtn_private.h
+++ b/src/compiler/spirv/vtn_private.h
@@ -745,6 +745,9 @@ void vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode,
bool vtn_handle_glsl450_instruction(struct vtn_builder *b, SpvOp ext_opcode,
const uint32_t *words, unsigned count);
+bool vtn_handle_opencl_instruction(struct vtn_builder *b, uint32_t ext_opcode,
+ const uint32_t *words, unsigned count);
+
struct vtn_builder* vtn_create_builder(const uint32_t *words, size_t word_count,
gl_shader_stage stage, const char *entry_point_name,
const struct spirv_to_nir_options *options);
--
2.19.1
More information about the mesa-dev
mailing list