Files
Aurora/src/Function1D.cu

956 lines
35 KiB
Plaintext
Raw Normal View History

#include "CudaMatrix.h"
#include "AuroraDefs.h"
#include "Function1D.cuh"
#include "Function1D.h"
#include "Matrix.h"
#include <cmath>
#include <cstdlib>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/complex.h>
#include <cuda_runtime.h>
#include <cusolverDn.h>
using namespace Aurora;
using namespace thrust::placeholders;
namespace
{
const int THREADS_PER_BLOCK = 256;
}
__global__ void complexKernel(float* aInputData, float* aOutput, unsigned int aSize)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < aSize)
{
aOutput[2*idx] = aInputData[idx];
aOutput[2*idx + 1] = 0;
}
}
CudaMatrix Aurora::complex(const CudaMatrix& aMatrix)
{
if(aMatrix.isComplex())
{
return CudaMatrix();
}
size_t size = aMatrix.getDataSize();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * aMatrix.getDataSize() * Aurora::Complex);
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
complexKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(aMatrix.getData(), data, size);
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2), Aurora::Complex);
}
__global__ void realKernel(float* aInputData, float* aOutput, unsigned int aSize)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < aSize)
{
aOutput[idx] = aInputData[idx*2];
}
}
CudaMatrix Aurora::real(const CudaMatrix& aMatrix)
{
if(!aMatrix.isComplex())
{
return CudaMatrix();
}
size_t size = aMatrix.getDataSize();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * aMatrix.getDataSize());
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
realKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(aMatrix.getData(), data, size);
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2), Aurora::Normal);
}
__global__ void imageKernel(float* aInputData, float* aOutput, unsigned int aSize)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < aSize)
{
aOutput[idx] = aInputData[idx*2 + 1];
}
}
CudaMatrix Aurora::imag(const CudaMatrix& aMatrix)
{
if(!aMatrix.isComplex())
{
return CudaMatrix();
}
size_t size = aMatrix.getDataSize();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * aMatrix.getDataSize());
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
imageKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(aMatrix.getData(), data, size);
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2), Aurora::Normal);
}
__global__ void ceilKernel(float* aInputData, float* aOutput, unsigned int aSize)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < aSize)
{
aOutput[idx] = std::ceil(aInputData[idx]);
}
}
CudaMatrix Aurora::ceil(const CudaMatrix& aMatrix)
{
size_t size = aMatrix.getDataSize() * aMatrix.getValueType();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size);
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
ceilKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(aMatrix.getData(), data, size);
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2), aMatrix.getValueType());
}
CudaMatrix Aurora::ceil(const CudaMatrix&& aMatrix)
{
size_t size = aMatrix.getDataSize() * aMatrix.getValueType();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size);
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
ceilKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(aMatrix.getData(), data, size);
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2), aMatrix.getValueType());
}
__global__ void roundKernel(float* aInputData, float* aOutput, unsigned int aSize)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < aSize)
{
aOutput[idx] = std::round(aInputData[idx]);
}
}
CudaMatrix Aurora::round(const CudaMatrix& aMatrix)
{
size_t size = aMatrix.getDataSize() * aMatrix.getValueType();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size);
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
roundKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(aMatrix.getData(), data, size);
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2), aMatrix.getValueType());
}
CudaMatrix Aurora::round(const CudaMatrix&& aMatrix)
{
size_t size = aMatrix.getDataSize() * aMatrix.getValueType();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size);
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
roundKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(aMatrix.getData(), data, size);
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2), aMatrix.getValueType());
}
__global__ void floorKernel(float* aInputData, float* aOutput, unsigned int aSize)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < aSize)
{
aOutput[idx] = std::floor(aInputData[idx]);
}
}
CudaMatrix Aurora::floor(const CudaMatrix& aMatrix)
{
size_t size = aMatrix.getDataSize() * aMatrix.getValueType();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size);
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
floorKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(aMatrix.getData(), data, size);
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2), aMatrix.getValueType());
}
CudaMatrix Aurora::floor(const CudaMatrix&& aMatrix)
{
size_t size = aMatrix.getDataSize() * aMatrix.getValueType();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size);
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
floorKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(aMatrix.getData(), data, size);
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2), aMatrix.getValueType());
}
__global__ void sqrtKernel(float* aInputData, float* aOutput, unsigned int aSize)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < aSize)
{
aOutput[idx] = std::sqrt(aInputData[idx]);
}
}
CudaMatrix Aurora::sqrt(const CudaMatrix& aMatrix)
{
if(aMatrix.getValueType() == Aurora::Complex)
{
std::cerr<<"sqrt not support complex"<<std::endl;
return CudaMatrix();
}
size_t size = aMatrix.getDataSize() * aMatrix.getValueType();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size);
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
sqrtKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(aMatrix.getData(), data, size);
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2), aMatrix.getValueType());
}
CudaMatrix Aurora::sqrt(const CudaMatrix&& aMatrix)
{
if(aMatrix.getValueType() == Aurora::Complex)
{
std::cerr<<"sqrt not support complex"<<std::endl;
return CudaMatrix();
}
size_t size = aMatrix.getDataSize() * aMatrix.getValueType();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size);
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
sqrtKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(aMatrix.getData(), data, size);
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2), aMatrix.getValueType());
}
__global__ void absKernel(float* aInputData, float* aOutput, unsigned int aSize, bool aIsComplex)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < aSize)
{
if(aIsComplex)
{
aOutput[idx] = sqrt(aInputData[2*idx] * aInputData[2*idx] + aInputData[2*idx+1] * aInputData[2*idx+1]);
}
else
{
aOutput[idx] = abs(aInputData[idx]);
}
}
}
CudaMatrix Aurora::abs(const CudaMatrix& aMatrix)
{
size_t size = aMatrix.getDataSize();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size);
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
absKernel<<<blocksPerGrid,THREADS_PER_BLOCK >>>(aMatrix.getData(), data, size, aMatrix.isComplex());
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2));
}
CudaMatrix Aurora::abs(const CudaMatrix&& aMatrix)
{
size_t size = aMatrix.getDataSize();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size);
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
absKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(aMatrix.getData(), data, size, aMatrix.isComplex());
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2));
}
__global__ void signKernel(float* aInputData, float* aOutput, unsigned int aSize, bool aIsComplex)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < aSize)
{
if(aIsComplex)
{
float absValue = sqrt(aInputData[2*idx] * aInputData[2*idx] + aInputData[2*idx + 1] * aInputData[2*idx + 1]);
aOutput[2*idx] = aInputData[2*idx] / absValue;
aOutput[2*idx + 1] = aInputData[2*idx + 1] / absValue;
return;
}
if(aInputData[idx] < 0)
{
aOutput[idx] = -1;
}
else if(aInputData[idx] > 0)
{
aOutput[idx] = 1;
}
else
{
aOutput[idx] = 0;
}
}
}
CudaMatrix Aurora::sign(const CudaMatrix& aMatrix)
{
size_t size = aMatrix.getDataSize() * aMatrix.getValueType();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size);
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
signKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(aMatrix.getData(), data, aMatrix.getDataSize(), aMatrix.isComplex());
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2), aMatrix.getValueType());
}
CudaMatrix Aurora::sign(const CudaMatrix&& aMatrix)
{
size_t size = aMatrix.getDataSize() * aMatrix.getValueType();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size);
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
signKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(aMatrix.getData(), data, aMatrix.getDataSize(), aMatrix.isComplex());
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2), aMatrix.getValueType());
}
void Aurora::compareSet(CudaMatrix& aValueMatrix,float compareValue, float newValue,CompareOp op)
{
switch (op)
{
case GT:
{
auto lambda = [=] __host__ __device__ (const float& x){
return x>compareValue?newValue:x;
};
thrust::transform(thrust::device,aValueMatrix.getData(),aValueMatrix.getData()+aValueMatrix.getDataSize(),aValueMatrix.getData(),lambda);
break;
}
case NG:{
auto lambda = [=] __host__ __device__ (const float& x){
return x<=compareValue?newValue:x;
};
thrust::transform(thrust::device,aValueMatrix.getData(),aValueMatrix.getData()+aValueMatrix.getDataSize(),aValueMatrix.getData(),lambda);
break;
}
case EQ:{
auto lambda = [=] __host__ __device__ (const float& x){
return x==compareValue?newValue:x;
};
thrust::transform(thrust::device,aValueMatrix.getData(),aValueMatrix.getData()+aValueMatrix.getDataSize(),aValueMatrix.getData(),lambda);
break;
}
case NE:{
auto lambda = [=] __host__ __device__ (const float& x){
return x!=compareValue?newValue:x;
};
thrust::transform(thrust::device,aValueMatrix.getData(),aValueMatrix.getData()+aValueMatrix.getDataSize(),aValueMatrix.getData(),lambda);
break;
}
case NL:{
auto lambda = [=] __host__ __device__ (const float& x){
return x>=compareValue?newValue:x;
};
thrust::transform(thrust::device,aValueMatrix.getData(),aValueMatrix.getData()+aValueMatrix.getDataSize(),aValueMatrix.getData(),lambda);
break;
}
case LT:{
auto lambda = [=] __host__ __device__ (const float& x){
return x<compareValue?newValue:x;
};
thrust::transform(thrust::device,aValueMatrix.getData(),aValueMatrix.getData()+aValueMatrix.getDataSize(),aValueMatrix.getData(),lambda);
break;
}
default:
break;
}
}
void Aurora::compareSet(CudaMatrix& aValueMatrix,CudaMatrix& aCompareMatrix,float compareValue, float newValue,CompareOp op){
switch (op)
{
case GT:
{
auto lambda = [=] __host__ __device__ (const float& x, const float& y){
return x>compareValue?newValue:y;
};
thrust::transform(thrust::device,aCompareMatrix.getData(),
aCompareMatrix.getData()+aValueMatrix.getDataSize(),
aValueMatrix.getData(), aValueMatrix.getData(),
lambda);
break;
}
case NG:{
auto lambda = [=] __host__ __device__ (const float& x, const float& y){
return x<=compareValue?newValue:y;
};
thrust::transform(thrust::device,aCompareMatrix.getData(),
aCompareMatrix.getData()+aValueMatrix.getDataSize(),
aValueMatrix.getData(), aValueMatrix.getData(),
lambda);
break;
}
case EQ:{
auto lambda = [=] __host__ __device__ (const float& x, const float& y){
return x==compareValue?newValue:y;
};
thrust::transform(thrust::device,aCompareMatrix.getData(),
aCompareMatrix.getData()+aValueMatrix.getDataSize(),
aValueMatrix.getData(), aValueMatrix.getData(),
lambda);
break;
}
case NE:{
auto lambda = [=] __host__ __device__ (const float& x, const float& y){
return x!=compareValue?newValue:y;
};
thrust::transform(thrust::device,aCompareMatrix.getData(),
aCompareMatrix.getData()+aValueMatrix.getDataSize(),
aValueMatrix.getData(), aValueMatrix.getData(),
lambda);
break;
}
case NL:{
auto lambda = [=] __host__ __device__ (const float& x, const float& y){
return x>=compareValue?newValue:y;
};
thrust::transform(thrust::device,aCompareMatrix.getData(),
aCompareMatrix.getData()+aValueMatrix.getDataSize(),
aValueMatrix.getData(), aValueMatrix.getData(),
lambda);
break;
}
case LT:{
auto lambda = [=] __host__ __device__ (const float& x, const float& y){
return x<compareValue?newValue:y;
};
thrust::transform(thrust::device,aCompareMatrix.getData(),
aCompareMatrix.getData()+aValueMatrix.getDataSize(),
aValueMatrix.getData(), aValueMatrix.getData(),
lambda);
break;
}
default:
break;
}
}
void Aurora::compareSet(CudaMatrix& aDesAndCompareMatrix,CudaMatrix& aOtherCompareMatrix, float newValue,CompareOp op){
switch (op)
{
case GT:
{
auto lambda = [=] __host__ __device__ (const float& x, const float& y){
return x>y?newValue:x;
};
thrust::transform(thrust::device,aDesAndCompareMatrix.getData(),
aDesAndCompareMatrix.getData()+aDesAndCompareMatrix.getDataSize(),
aOtherCompareMatrix.getData(), aDesAndCompareMatrix.getData(),
lambda);
break;
}
case NG:{
auto lambda = [=] __host__ __device__ (const float& x, const float& y){
return x<=y?newValue:x;
};
thrust::transform(thrust::device,aDesAndCompareMatrix.getData(),
aDesAndCompareMatrix.getData()+aDesAndCompareMatrix.getDataSize(),
aOtherCompareMatrix.getData(), aDesAndCompareMatrix.getData(),
lambda);
break;
}
case EQ:{
auto lambda = [=] __host__ __device__ (const float& x, const float& y){
return x==y?newValue:x;
};
thrust::transform(thrust::device,aDesAndCompareMatrix.getData(),
aDesAndCompareMatrix.getData()+aDesAndCompareMatrix.getDataSize(),
aOtherCompareMatrix.getData(), aDesAndCompareMatrix.getData(),
lambda);
break;
}
case NE:{
auto lambda = [=] __host__ __device__ (const float& x, const float& y){
return x!=y?newValue:x;
};
thrust::transform(thrust::device,aDesAndCompareMatrix.getData(),
aDesAndCompareMatrix.getData()+aDesAndCompareMatrix.getDataSize(),
aOtherCompareMatrix.getData(), aDesAndCompareMatrix.getData(),
lambda);
break;
}
case NL:{
auto lambda = [=] __host__ __device__ (const float& x, const float& y){
return x>=y?newValue:x;
};
thrust::transform(thrust::device,aDesAndCompareMatrix.getData(),
aDesAndCompareMatrix.getData()+aDesAndCompareMatrix.getDataSize(),
aOtherCompareMatrix.getData(), aDesAndCompareMatrix.getData(),
lambda);
break;
}
case LT:{
auto lambda = [=] __host__ __device__ (const float& x, const float& y){
return x<y?newValue:x;
};
thrust::transform(thrust::device,aDesAndCompareMatrix.getData(),
aDesAndCompareMatrix.getData()+aDesAndCompareMatrix.getDataSize(),
aOtherCompareMatrix.getData(), aDesAndCompareMatrix.getData(),
lambda);
break;
}
default:
break;
}
}
void Aurora::compareSet(CudaMatrix& aCompareMatrix,float compareValue, CudaMatrix& aNewValueMatrix,CompareOp op)
{
switch (op)
{
case GT:
{
auto lambda = [=] __host__ __device__ (const float& x, const float& y){
return x>compareValue?y:x;
};
thrust::transform(thrust::device,aCompareMatrix.getData(),
aCompareMatrix.getData()+aCompareMatrix.getDataSize(),
aNewValueMatrix.getData(), aCompareMatrix.getData(),
lambda);
break;
}
case NG:{
auto lambda = [=] __host__ __device__ (const float& x, const float& y){
return x<=compareValue?y:x;
};
thrust::transform(thrust::device,aCompareMatrix.getData(),
aCompareMatrix.getData()+aCompareMatrix.getDataSize(),
aNewValueMatrix.getData(), aCompareMatrix.getData(),
lambda);
break;
}
case EQ:{
auto lambda = [=] __host__ __device__ (const float& x, const float& y){
return x==compareValue?y:x;
};
thrust::transform(thrust::device,aCompareMatrix.getData(),
aCompareMatrix.getData()+aCompareMatrix.getDataSize(),
aNewValueMatrix.getData(), aCompareMatrix.getData(),
lambda);
break;
}
case NE:{
auto lambda = [=] __host__ __device__ (const float& x, const float& y){
return x!=compareValue?y:x;
};
thrust::transform(thrust::device,aCompareMatrix.getData(),
aCompareMatrix.getData()+aCompareMatrix.getDataSize(),
aNewValueMatrix.getData(), aCompareMatrix.getData(),
lambda);
break;
}
case NL:{
auto lambda = [=] __host__ __device__ (const float& x, const float& y){
return x>=compareValue?y:x;
};
thrust::transform(thrust::device,aCompareMatrix.getData(),
aCompareMatrix.getData()+aCompareMatrix.getDataSize(),
aNewValueMatrix.getData(), aCompareMatrix.getData(),
lambda);
break;
}
case LT:{
auto lambda = [=] __host__ __device__ (const float& x, const float& y){
return x<compareValue?y:x;
};
thrust::transform(thrust::device,aCompareMatrix.getData(),
aCompareMatrix.getData()+aCompareMatrix.getDataSize(),
aNewValueMatrix.getData(), aCompareMatrix.getData(),
lambda);
break;
}
default:
break;
}
}
__global__ void repMatKernel(float* aInputData, float* aOutput, unsigned int aInputSize, bool aIsComplex)
{
unsigned int idX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int idZ = blockIdx.z * blockDim.z + threadIdx.z;
if(aIsComplex)
{
unsigned int outPutIndex = 2 * (idZ * blockDim.x * blockDim.y * gridDim.x * gridDim.y + idY * blockDim.x * gridDim.x + idX);
unsigned int inPutIndex = 2 * (threadIdx.y * blockDim.x + threadIdx.x);
aOutput[outPutIndex] = aInputData[inPutIndex];
aOutput[outPutIndex + 1] = aInputData[inPutIndex + 1];
}
else
{
aOutput[idZ * blockDim.x * blockDim.y * gridDim.x * gridDim.y + idY * blockDim.x * gridDim.x + idX] = aInputData[threadIdx.y * blockDim.x + threadIdx.x];
}
}
CudaMatrix Aurora::repmat(const CudaMatrix& aMatrix,int aRowTimes, int aColumnTimes)
{
if(aRowTimes < 1 || aColumnTimes < 1 || aMatrix.getDims() > 2 || aMatrix.isNull())
{
return CudaMatrix();
}
dim3 blockSize(aMatrix.getDimSize(0), aMatrix.getDimSize(1), 1);
dim3 gridSize(aRowTimes, aColumnTimes, 1);
size_t size = aMatrix.getDataSize() * aMatrix.getValueType() * aRowTimes * aColumnTimes;
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size);
repMatKernel<<<gridSize, blockSize>>>(aMatrix.getData(), data, aMatrix.getDataSize(), aMatrix.isComplex());
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0) * aRowTimes, aMatrix.getDimSize(1) * aColumnTimes, aMatrix.getDimSize(2), aMatrix.getValueType());
}
CudaMatrix Aurora::repmat(const CudaMatrix& aMatrix,int aRowTimes, int aColumnTimes, int aSliceTimes)
{
if(aRowTimes < 1 || aColumnTimes < 1 || aMatrix.getDims() > 2 || aMatrix.isNull())
{
return CudaMatrix();
}
dim3 blockSize(aMatrix.getDimSize(0), aMatrix.getDimSize(1), 1);
dim3 gridSize(aRowTimes, aColumnTimes, aSliceTimes);
size_t size = aMatrix.getDataSize() * aMatrix.getValueType() * aRowTimes * aColumnTimes * aSliceTimes;
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size);
repMatKernel<<<gridSize, blockSize>>>(aMatrix.getData(), data, aMatrix.getDataSize(), aMatrix.isComplex());
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0) * aRowTimes, aMatrix.getDimSize(1) * aColumnTimes, aMatrix.getDimSize(2) * aSliceTimes, aMatrix.getValueType());
}
__global__ void repMat3DKernel(float* aInputData, float* aOutput, unsigned int aInputSize, bool aIsComplex)
{
unsigned int idX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int idZ = blockIdx.z * blockDim.z + threadIdx.z;
if(aIsComplex)
{
unsigned int outPutIndex = 2 * (idZ * blockDim.x * blockDim.y * gridDim.x * gridDim.y + idY * blockDim.x * gridDim.x + idX);
unsigned int inPutIndex = 2 * (threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x);
aOutput[outPutIndex] = aInputData[inPutIndex];
aOutput[outPutIndex + 1] = aInputData[inPutIndex + 1];
}
else
{
aOutput[idZ * blockDim.x * blockDim.y * gridDim.x * gridDim.y + idY * blockDim.x * gridDim.x + idX] = aInputData[threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x];
}
}
CudaMatrix Aurora::repmat3d(const CudaMatrix& aMatrix,int aRowTimes, int aColumnTimes, int aSliceTimes)
{
if(aRowTimes < 1 || aColumnTimes < 1 || aMatrix.getDims() < 3 || aMatrix.isNull())
{
return CudaMatrix();
}
dim3 blockSize(aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2));
dim3 gridSize(aRowTimes, aColumnTimes, aSliceTimes);
size_t size = aMatrix.getDataSize() * aMatrix.getValueType() * aRowTimes * aColumnTimes * aSliceTimes;
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size);
repMat3DKernel<<<gridSize, blockSize>>>(aMatrix.getData(), data, aMatrix.getDataSize(), aMatrix.isComplex());
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0) * aRowTimes, aMatrix.getDimSize(1) * aColumnTimes, aMatrix.getDimSize(2) * aSliceTimes, aMatrix.getValueType());
}
__global__ void logKernel(float* aInputData, float* aOutput, unsigned int aInputSize, int aBaseNum)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < aInputSize)
{
if(aBaseNum == -1)
{
aOutput[idx] = logf(aInputData[idx]);
}
else
{
float value = logf(aBaseNum);
aOutput[idx] = logf(aInputData[idx]) / value;
}
}
}
CudaMatrix Aurora::log(const CudaMatrix& aMatrix, int aBaseNum)
{
if(aMatrix.getValueType() == Aurora::Complex)
{
std::cerr<<"log not support complex"<<std::endl;
return CudaMatrix();
}
size_t size = aMatrix.getDataSize();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size);
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
logKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(aMatrix.getData(), data, size, aBaseNum);
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2), aMatrix.getValueType());
}
__global__ void expKernel(float* aInputData, float* aOutput, unsigned int aInputSize, bool aIsComplex)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < aInputSize)
{
if(aIsComplex)
{
unsigned int index = 2 * idx;
float expReal = expf(aInputData[index]);
aOutput[index] = expReal * cosf(aInputData[index + 1]);
aOutput[index + 1] = expReal * sinf(aInputData[index + 1]);
}
else
{
aOutput[idx] = expf(aInputData[idx]);
}
}
}
CudaMatrix Aurora::exp(const CudaMatrix& aMatrix)
{
size_t size = aMatrix.getDataSize();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size * aMatrix.getValueType());
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
expKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(aMatrix.getData(), data, size, aMatrix.isComplex());
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2), aMatrix.getValueType());
}
__global__ void modKernel(float* aInputData, float* aOutput, unsigned int aInputSize, float aModValue)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < aInputSize)
{
aOutput[idx] = fmodf(aInputData[idx], aModValue);
}
}
CudaMatrix Aurora::mod(const CudaMatrix& aMatrix, float aValue)
{
if(aMatrix.isComplex())
{
std::cerr<<"mod not support complex"<<std::endl;
return CudaMatrix();
}
size_t size = aMatrix.getDataSize();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size);
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
modKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(aMatrix.getData(), data, size, aValue);
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2));
}
__global__ void acosKernel(float* aInputData, float* aOutput, unsigned int aInputSize)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < aInputSize)
{
aOutput[idx] = acosf(aInputData[idx]);
}
}
CudaMatrix Aurora::acos(const CudaMatrix& aMatrix)
{
if(aMatrix.isComplex())
{
std::cerr<<"acos not support complex"<<std::endl;
return CudaMatrix();
}
size_t size = aMatrix.getDataSize();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size);
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
acosKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(aMatrix.getData(), data, size);
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2));
}
__global__ void acosdKernel(float* aInputData, float* aOutput, unsigned int aInputSize)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < aInputSize)
{
aOutput[idx] = acosf(aInputData[idx]) * 180 / PI;
}
}
CudaMatrix Aurora::acosd(const CudaMatrix& aMatrix)
{
if(aMatrix.isComplex())
{
std::cerr<<"acos not support complex"<<std::endl;
return CudaMatrix();
}
size_t size = aMatrix.getDataSize();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size);
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
acosdKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(aMatrix.getData(), data, size);
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2));
}
__global__ void conjKernel(float* aInputData, float* aOutput, unsigned int aInputSize)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < aInputSize)
{
unsigned int index = idx * 2;
aOutput[index] = aInputData[index];
aOutput[index + 1] = -aInputData[index + 1];
}
}
CudaMatrix Aurora::conj(const CudaMatrix& aMatrix)
{
if(!aMatrix.isComplex())
{
return CudaMatrix::copyFromRawData(aMatrix.getData(),aMatrix.getDimSize(0),aMatrix.getDimSize(1),aMatrix.getDimSize(2));
}
size_t size = aMatrix.getDataSize();
float* data = nullptr;
cudaMalloc((void**)&data, sizeof(float) * size * aMatrix.getValueType());
int blocksPerGrid = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
conjKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(aMatrix.getData(), data, size);
cudaDeviceSynchronize();
return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2), aMatrix.getValueType());
}
float Aurora::norm(const CudaMatrix& aMatrix, NormMethod aNormMethod)
{
float resultValue = 0;
if(aMatrix.getDims() > 2)
{
std::cerr<<"norm not support 3d matrix"<<std::endl;
return 0;
}
//for 1 dims
if(aMatrix.getDimSize(0) == 1 || aMatrix.getDimSize(1) == 1 )
{
if(aNormMethod == Aurora::Norm1)
{
CudaMatrix result = abs(aMatrix);
resultValue = thrust::reduce(thrust::device, result.getData(), result.getData() + result.getDataSize(), 0.0, thrust::plus<float>());
return resultValue;
}
else
{
CudaMatrix result = aMatrix.deepCopy();
thrust::transform(thrust::device, result.getData(), result.getData() + result.getDataSize() * result.getValueType(), result.getData(), thrust::square<float>());
resultValue = thrust::reduce(thrust::device, result.getData(), result.getData() + result.getDataSize() * result.getValueType(), 0.0, thrust::plus<float>());
return std::sqrt(resultValue);
}
}
//for 2 dims
if(aNormMethod == Aurora::NormF)
{
CudaMatrix result = aMatrix.deepCopy();
thrust::transform(thrust::device, result.getData(), result.getData() + result.getDataSize() * result.getValueType(), result.getData(), thrust::square<float>());
resultValue = thrust::reduce(thrust::device, result.getData(), result.getData() + result.getDataSize() * result.getValueType(), 0.0, thrust::plus<float>());
return std::sqrt(resultValue);
}
else if(aNormMethod == Aurora::Norm1)
{
for(int i=0; i<aMatrix.getDimSize(1); ++i)
{
CudaMatrix result = abs(aMatrix.block(1, i, i));
float tempValue = thrust::reduce(thrust::device, result.getData(), result.getData() + result.getDataSize(), 0.0, thrust::plus<float>());
if(resultValue < tempValue)
{
resultValue = tempValue;
}
}
return resultValue;
}
else
{
if(aMatrix.isComplex())
{
std::cerr<<"norm2 not support 2d complex matrix"<<std::endl;
return 0;
}
cusolverDnHandle_t cusolverH = NULL;
cudaStream_t stream = NULL;
float* d_S = NULL;
float* d_U = NULL;
float* d_VT = NULL;
int* devInfo = NULL;
float* d_work = NULL;
int lwork = 0;
const int m = aMatrix.getDimSize(0);
const int n = aMatrix.getDimSize(1);
const int lda = m;
const int ldu = m;
const int ldvt = n;
cusolverDnCreate(&cusolverH);
cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
cusolverDnSetStream(cusolverH, stream);
cudaMalloc((void**)&d_S, sizeof(float)*n);
cudaMalloc((void**)&d_U, sizeof(float)*ldu*m);
cudaMalloc((void**)&d_VT, sizeof(float)*ldvt*n);
cudaMalloc((void**)&devInfo, sizeof(int));
cusolverDnSgesvd_bufferSize(cusolverH, m, n, &lwork);
cudaMalloc((void**)&d_work, sizeof(float)*lwork);
auto matrix = aMatrix.deepCopy();
cusolverDnSgesvd(cusolverH, 'A', 'A', m, n, matrix.getData(), lda, d_S,d_U, ldu, d_VT, ldvt, d_work, lwork, NULL, devInfo);
int devInfo_h = 0;
cudaMemcpy(&devInfo_h, devInfo, sizeof(int), cudaMemcpyDeviceToHost);
if (devInfo_h != 0)
{
printf("Unsuccessful SVD execution\n");
printf("Error code: %d\n", devInfo_h);
}
float S[n] = {0};
cudaMemcpy(S, d_S, sizeof(float)*n, cudaMemcpyDeviceToHost);
float resultValue = S[0];
for (int i = 1; i < n; i++)
{
if (S[i] > resultValue)
{
resultValue = S[i];
}
}
if (d_S ) cudaFree(d_S);
if (d_U ) cudaFree(d_U);
if (d_VT) cudaFree(d_VT);
if (devInfo) cudaFree(devInfo);
if (d_work) cudaFree(d_work);
if (cusolverH) cusolverDnDestroy(cusolverH);
if (stream) cudaStreamDestroy(stream);
return resultValue;
}
}