cuElim/include/gf256/gf256_elim.cuh
2024-09-19 15:59:53 +08:00

179 lines
6.1 KiB
Plaintext

#ifndef GF256_ELIM_CUH
#define GF256_ELIM_CUH
#include "gf256_mat.cuh"
namespace gf256
{
size_t cpu_elim_base(base_t *base_col, base_t base_col_len, size_t st_r, size_t w, vector<size_t> &p_col, vector<size_t> &p_row, const GF256 &gf)
{
size_t rank = 0;
size_t pivot[gf256_num];
size_t next[gf256_num];
for (size_t pivot_col = 0; pivot_col < gf256_num; pivot_col++)
{
for (size_t r = rank; r < base_col_len; r++)
{
for (size_t i = 0; i < rank; i++)
{
if (next[i] == r)
{
base_col[r] ^= gf.mul_base(get8(base_col[r], pivot[i]), base_col[i], pivot[i] + 1);
next[i]++;
}
}
if (get8(base_col[r], pivot_col) != 0)
{
p_col.push_back(w * gf256_num + pivot_col);
p_row.push_back(st_r + r);
if (r != rank)
{
base_t temp = base_col[rank];
base_col[rank] = base_col[r];
base_col[r] = temp;
}
base_col[rank] = concat8(base_col[rank], pivot_col + 1, gf.mul_base(gf.inv(get8(base_col[rank], pivot_col)), base_col[rank], pivot_col + 1));
pivot[rank] = pivot_col;
next[rank] = rank + 1;
rank++;
break;
}
}
}
return rank;
}
__global__ void gpu_mksrc_kernel(base_t *src, size_t s_rowstride, base_t *spL, size_t src_rank, size_t width)
{
size_t w = blockIdx.x * blockDim.x + threadIdx.x;
if (w >= width)
{
return;
}
base_t temp[gf256_num];
for (size_t r = 0; r < src_rank; r++)
{
temp[r] = *at_base(src, s_rowstride, r, w);
}
for (size_t r = 0; r < src_rank; r++)
{
for (size_t i = 0; i < r; i++)
{
temp[r] ^= mul_base(get8(spL[r], i), temp[i]);
}
temp[r] = mul_base(get8(spL[r], r), temp[r]);
}
for (size_t rr = 1; rr < src_rank; rr++)
{
size_t r = src_rank - 1 - rr;
for (size_t i = r + 1; i < src_rank; i++)
{
temp[r] ^= mul_base(get8(spL[r], i), temp[i]);
}
}
for (size_t r = 0; r < src_rank; r++)
{
*at_base(src, s_rowstride, r, w) = temp[r];
}
}
__global__ void gpu_elim_kernel(base_t *idx, base_t *tb, size_t tb_rowstride, base_t *data, size_t rowstride, size_t rank, base_t pivot_base, size_t st_skip, size_t width, size_t nrows)
{
size_t w = blockIdx.x * blockDim.x + threadIdx.x;
size_t r = blockIdx.y * blockDim.y + threadIdx.y;
if (w >= width || r >= nrows || (r >= st_skip && r < st_skip + rank))
{
return;
}
base_t val = idx[r];
base_t temp = base_zero;
for (size_t i = 0; i < rank; i++)
{
temp ^= *at_base(tb, tb_rowstride, i * (1 << gf256_len) + get8(val, get8(pivot_base, i)), w);
}
*at_base(data, rowstride, r, w) ^= temp;
}
__managed__ base_t spL[gf256_num];
__host__ ElimResult MatGF256::gpu_elim(const GF256 &gf)
{
gf.cpy_to_constant();
MatGF256 tb(gf256_num * (1 << gf256_len), ncols);
base_t *base_col;
cudaMallocManaged(&base_col, nrows * sizeof(base_t));
base_t *idx;
cudaMallocManaged(&idx, nrows * sizeof(base_t));
size_t rank = 0;
vector<size_t> p_col, p_row;
progress::ProgressBar pb("GPU ELIMINATE", width);
for (size_t w = 0; w < width; w++, pb.tick_display())
{
CUDA_CHECK(cudaMemcpy2D(base_col + rank, sizeof(base_t), at_base(rank, w), rowstride * sizeof(base_t), sizeof(base_t), nrows - rank, cudaMemcpyDefault));
size_t src_rank = cpu_elim_base(base_col + rank, nrows - rank, rank, w, p_col, p_row, gf);
if (src_rank == 0)
{
continue;
}
for (size_t i = 0; i < src_rank; i++)
{
cpu_swap_row(rank + i, p_row[rank + i]);
spL[i] = base_zero;
}
base_t pivot_base = base_zero;
for (size_t r = 0; r < src_rank; r++)
{
size_t loc = (p_col[rank + r] - w * gf256_num);
set8(spL[r], gf.inv(get8(base_col[rank + r], loc)), r);
for (size_t i = 0; i < r; i++)
{
set8(spL[i], get8(base_col[rank + i], loc), r);
}
for (size_t i = r + 1; i < src_rank; i++)
{
set8(spL[i], get8(base_col[rank + i], loc), r);
}
set8(pivot_base, loc, r);
}
dim3 block_src(THREAD_X);
dim3 grid_src((width - w - 1) / block_src.x + 1);
gpu_mksrc_kernel<<<grid_src, block_src>>>(at_base(rank, w), rowstride, spL, src_rank, width);
cudaDeviceSynchronize();
dim3 block_tb(THREAD_X, THREAD_Y);
dim3 grid_tb((width - w - 1) / block_tb.x + 1, (src_rank * (1 << gf256_len) - 1) / block_tb.y + 1);
gpu_mktb_kernel<<<grid_tb, block_tb>>>(tb.data, tb.rowstride, at_base(rank, w), rowstride, tb.width);
cudaDeviceSynchronize();
CUDA_CHECK(cudaMemcpy2D(idx, sizeof(base_t), at_base(0, w), rowstride * sizeof(base_t), sizeof(base_t), nrows, cudaMemcpyDefault));
dim3 block(THREAD_X, THREAD_Y);
dim3 grid((width - w - 1) / block.x + 1, (nrows - 1) / block.y + 1);
gpu_elim_kernel<<<grid, block>>>(idx, tb.data, tb.rowstride, at_base(0, w), rowstride, src_rank, pivot_base, rank, width - w, nrows);
cudaDeviceSynchronize();
rank += src_rank;
if (rank == nrows)
{
break;
}
}
cudaFree(base_col);
cudaFree(idx);
return {rank, p_col, p_row};
}
}
#endif