[AArch64][10/10] ARMv8.2-A FP16 lane scalar intrinsics

gcc/
	* config/aarch64/arm_neon.h (vfmah_lane_f16, vfmah_laneq_f16,
	vfmsh_lane_f16, vfmsh_laneq_f16, vmulh_lane_f16, vmulh_laneq_f16,
	vmulxh_lane_f16, vmulxh_laneq_f16): New.

From-SVN: r238725
This commit is contained in:
Jiong Wang 2016-07-25 16:15:34 +00:00 committed by Jiong Wang
parent 9a594ad6ef
commit bb6131dbd1
2 changed files with 58 additions and 0 deletions

View File

@ -1,3 +1,9 @@
2016-07-25 Jiong Wang <jiong.wang@arm.com>
* config/aarch64/arm_neon.h (vfmah_lane_f16, vfmah_laneq_f16,
vfmsh_lane_f16, vfmsh_laneq_f16, vmulh_lane_f16, vmulh_laneq_f16,
vmulxh_lane_f16, vmulxh_laneq_f16): New.
2016-07-25 Jiong Wang <jiong.wang@arm.com>
* config/aarch64/aarch64-simd-builtins.def: Register new builtins.

View File

@ -26777,6 +26777,20 @@ vfmsq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
/* ARMv8.2-A FP16 lane vector intrinsics. */
__extension__ static __inline float16_t __attribute__ ((__always_inline__))
vfmah_lane_f16 (float16_t __a, float16_t __b,
float16x4_t __c, const int __lane)
{
return vfmah_f16 (__a, __b, __aarch64_vget_lane_any (__c, __lane));
}
__extension__ static __inline float16_t __attribute__ ((__always_inline__))
vfmah_laneq_f16 (float16_t __a, float16_t __b,
float16x8_t __c, const int __lane)
{
return vfmah_f16 (__a, __b, __aarch64_vget_lane_any (__c, __lane));
}
__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
vfma_lane_f16 (float16x4_t __a, float16x4_t __b,
float16x4_t __c, const int __lane)
@ -26817,6 +26831,20 @@ vfmaq_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c)
return vfmaq_f16 (__a, __b, vdupq_n_f16 (__c));
}
__extension__ static __inline float16_t __attribute__ ((__always_inline__))
vfmsh_lane_f16 (float16_t __a, float16_t __b,
float16x4_t __c, const int __lane)
{
return vfmsh_f16 (__a, __b, __aarch64_vget_lane_any (__c, __lane));
}
__extension__ static __inline float16_t __attribute__ ((__always_inline__))
vfmsh_laneq_f16 (float16_t __a, float16_t __b,
float16x8_t __c, const int __lane)
{
return vfmsh_f16 (__a, __b, __aarch64_vget_lane_any (__c, __lane));
}
__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
vfms_lane_f16 (float16x4_t __a, float16x4_t __b,
float16x4_t __c, const int __lane)
@ -26857,6 +26885,12 @@ vfmsq_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c)
return vfmsq_f16 (__a, __b, vdupq_n_f16 (__c));
}
__extension__ static __inline float16_t __attribute__ ((__always_inline__))
vmulh_lane_f16 (float16_t __a, float16x4_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
vmul_lane_f16 (float16x4_t __a, float16x4_t __b, const int __lane)
{
@ -26869,6 +26903,12 @@ vmulq_lane_f16 (float16x8_t __a, float16x4_t __b, const int __lane)
return vmulq_f16 (__a, vdupq_n_f16 (__aarch64_vget_lane_any (__b, __lane)));
}
__extension__ static __inline float16_t __attribute__ ((__always_inline__))
vmulh_laneq_f16 (float16_t __a, float16x8_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
vmul_laneq_f16 (float16x4_t __a, float16x8_t __b, const int __lane)
{
@ -26893,6 +26933,12 @@ vmulq_n_f16 (float16x8_t __a, float16_t __b)
return vmulq_laneq_f16 (__a, vdupq_n_f16 (__b), 0);
}
__extension__ static __inline float16_t __attribute__ ((__always_inline__))
vmulxh_lane_f16 (float16_t __a, float16x4_t __b, const int __lane)
{
return vmulxh_f16 (__a, __aarch64_vget_lane_any (__b, __lane));
}
__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
vmulx_lane_f16 (float16x4_t __a, float16x4_t __b, const int __lane)
{
@ -26905,6 +26951,12 @@ vmulxq_lane_f16 (float16x8_t __a, float16x4_t __b, const int __lane)
return vmulxq_f16 (__a, __aarch64_vdupq_lane_f16 (__b, __lane));
}
__extension__ static __inline float16_t __attribute__ ((__always_inline__))
vmulxh_laneq_f16 (float16_t __a, float16x8_t __b, const int __lane)
{
return vmulxh_f16 (__a, __aarch64_vget_lane_any (__b, __lane));
}
__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
vmulx_laneq_f16 (float16x4_t __a, float16x8_t __b, const int __lane)
{