__m512i vx = _mm512_set1_epi64(static_cast<long long>(x));
__m512i vk = _mm512_load_si512(reinterpret_cast<const __m512i*>(base));
__mmask8 m = _mm512_cmp_epu64_mask(vx, vk, _MM_CMPINT_GE);
return static_cast<std::uint32_t>(__builtin_popcount(m));
would be replaced with: return __riscv_vcpop(__riscv_vmsgeu(__riscv_vle64_v_u64m1(base, FANOUT), x, FANOUT), FANOUT);
and you set FANOUT to __riscv_vsetvlmax_e32m1() at runtime.Alternatively, if you don't want a dynamic FANOUT you keep the FANOUT=8 (or another constant) and do a stripmining loop
size_t cnt = 0;
for (size_t vl, n = 8; n > 0; n -= vl, base += vl) {
vl = __riscv_vsetvl_e64m1(n);
cnt += __riscv_vcpop(__riscv_vmsgeu(__riscv_vle64_v_u64m1(base, vl), x, vl), vl);
}
return cnt;
This will take FANOUT/VLEN iterations and the branches will be essentially almost predicted.If you know FANOUT is always 8 and you'll never want to changes it, you could alternatively use select the optimal LMUL:
size_t vl = __riscv_vsetvlmax_e32m1();
if (vl == 2) return __riscv_vcpop(__riscv_vmsgeu(__riscv_vle64_v_u64m4(base, 8), x, 8), 8);
if (vl == 4) return __riscv_vcpop(__riscv_vmsge(u__riscv_vle64_v_u64m2(base, 8), x, 8), 8);
return __riscv_vcpop(__riscv_vmsgeu(__riscv_vle64_v_u64m1(base, 8), x, 8), 8);