2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 01:34:00 +08:00
linux-next/arch/arm64/lib/xor-neon.c
Thomas Gleixner d2912cb15b treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 500
Based on 2 normalized pattern(s):

  this program is free software you can redistribute it and or modify
  it under the terms of the gnu general public license version 2 as
  published by the free software foundation

  this program is free software you can redistribute it and or modify
  it under the terms of the gnu general public license version 2 as
  published by the free software foundation #

extracted by the scancode license scanner the SPDX license identifier

  GPL-2.0-only

has been chosen to replace the boilerplate/reference in 4122 file(s).

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Enrico Weigelt <info@metux.net>
Reviewed-by: Kate Stewart <kstewart@linuxfoundation.org>
Reviewed-by: Allison Randal <allison@lohutok.net>
Cc: linux-spdx@vger.kernel.org
Link: https://lkml.kernel.org/r/20190604081206.933168790@linutronix.de
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-06-19 17:09:55 +02:00

182 lines
4.7 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm64/lib/xor-neon.c
*
* Authors: Jackie Liu <liuyun01@kylinos.cn>
* Copyright (C) 2018,Tianjin KYLIN Information Technology Co., Ltd.
*/
#include <linux/raid/xor.h>
#include <linux/module.h>
#include <asm/neon-intrinsics.h>
void xor_arm64_neon_2(unsigned long bytes, unsigned long *p1,
unsigned long *p2)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
register uint64x2_t v0, v1, v2, v3;
long lines = bytes / (sizeof(uint64x2_t) * 4);
do {
/* p1 ^= p2 */
v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0));
v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2));
v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4));
v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6));
/* store */
vst1q_u64(dp1 + 0, v0);
vst1q_u64(dp1 + 2, v1);
vst1q_u64(dp1 + 4, v2);
vst1q_u64(dp1 + 6, v3);
dp1 += 8;
dp2 += 8;
} while (--lines > 0);
}
void xor_arm64_neon_3(unsigned long bytes, unsigned long *p1,
unsigned long *p2, unsigned long *p3)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
uint64_t *dp3 = (uint64_t *)p3;
register uint64x2_t v0, v1, v2, v3;
long lines = bytes / (sizeof(uint64x2_t) * 4);
do {
/* p1 ^= p2 */
v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0));
v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2));
v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4));
v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6));
/* p1 ^= p3 */
v0 = veorq_u64(v0, vld1q_u64(dp3 + 0));
v1 = veorq_u64(v1, vld1q_u64(dp3 + 2));
v2 = veorq_u64(v2, vld1q_u64(dp3 + 4));
v3 = veorq_u64(v3, vld1q_u64(dp3 + 6));
/* store */
vst1q_u64(dp1 + 0, v0);
vst1q_u64(dp1 + 2, v1);
vst1q_u64(dp1 + 4, v2);
vst1q_u64(dp1 + 6, v3);
dp1 += 8;
dp2 += 8;
dp3 += 8;
} while (--lines > 0);
}
void xor_arm64_neon_4(unsigned long bytes, unsigned long *p1,
unsigned long *p2, unsigned long *p3, unsigned long *p4)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
uint64_t *dp3 = (uint64_t *)p3;
uint64_t *dp4 = (uint64_t *)p4;
register uint64x2_t v0, v1, v2, v3;
long lines = bytes / (sizeof(uint64x2_t) * 4);
do {
/* p1 ^= p2 */
v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0));
v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2));
v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4));
v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6));
/* p1 ^= p3 */
v0 = veorq_u64(v0, vld1q_u64(dp3 + 0));
v1 = veorq_u64(v1, vld1q_u64(dp3 + 2));
v2 = veorq_u64(v2, vld1q_u64(dp3 + 4));
v3 = veorq_u64(v3, vld1q_u64(dp3 + 6));
/* p1 ^= p4 */
v0 = veorq_u64(v0, vld1q_u64(dp4 + 0));
v1 = veorq_u64(v1, vld1q_u64(dp4 + 2));
v2 = veorq_u64(v2, vld1q_u64(dp4 + 4));
v3 = veorq_u64(v3, vld1q_u64(dp4 + 6));
/* store */
vst1q_u64(dp1 + 0, v0);
vst1q_u64(dp1 + 2, v1);
vst1q_u64(dp1 + 4, v2);
vst1q_u64(dp1 + 6, v3);
dp1 += 8;
dp2 += 8;
dp3 += 8;
dp4 += 8;
} while (--lines > 0);
}
void xor_arm64_neon_5(unsigned long bytes, unsigned long *p1,
unsigned long *p2, unsigned long *p3,
unsigned long *p4, unsigned long *p5)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
uint64_t *dp3 = (uint64_t *)p3;
uint64_t *dp4 = (uint64_t *)p4;
uint64_t *dp5 = (uint64_t *)p5;
register uint64x2_t v0, v1, v2, v3;
long lines = bytes / (sizeof(uint64x2_t) * 4);
do {
/* p1 ^= p2 */
v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0));
v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2));
v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4));
v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6));
/* p1 ^= p3 */
v0 = veorq_u64(v0, vld1q_u64(dp3 + 0));
v1 = veorq_u64(v1, vld1q_u64(dp3 + 2));
v2 = veorq_u64(v2, vld1q_u64(dp3 + 4));
v3 = veorq_u64(v3, vld1q_u64(dp3 + 6));
/* p1 ^= p4 */
v0 = veorq_u64(v0, vld1q_u64(dp4 + 0));
v1 = veorq_u64(v1, vld1q_u64(dp4 + 2));
v2 = veorq_u64(v2, vld1q_u64(dp4 + 4));
v3 = veorq_u64(v3, vld1q_u64(dp4 + 6));
/* p1 ^= p5 */
v0 = veorq_u64(v0, vld1q_u64(dp5 + 0));
v1 = veorq_u64(v1, vld1q_u64(dp5 + 2));
v2 = veorq_u64(v2, vld1q_u64(dp5 + 4));
v3 = veorq_u64(v3, vld1q_u64(dp5 + 6));
/* store */
vst1q_u64(dp1 + 0, v0);
vst1q_u64(dp1 + 2, v1);
vst1q_u64(dp1 + 4, v2);
vst1q_u64(dp1 + 6, v3);
dp1 += 8;
dp2 += 8;
dp3 += 8;
dp4 += 8;
dp5 += 8;
} while (--lines > 0);
}
struct xor_block_template const xor_block_inner_neon = {
.name = "__inner_neon__",
.do_2 = xor_arm64_neon_2,
.do_3 = xor_arm64_neon_3,
.do_4 = xor_arm64_neon_4,
.do_5 = xor_arm64_neon_5,
};
EXPORT_SYMBOL(xor_block_inner_neon);
MODULE_AUTHOR("Jackie Liu <liuyun01@kylinos.cn>");
MODULE_DESCRIPTION("ARMv8 XOR Extensions");
MODULE_LICENSE("GPL");