#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include using namespace Aurora; __global__ void maxColKernel(float* aInputData, float* aOutput, unsigned int aColSize) { //确定每个thread的index unsigned int idx = blockIdx.x * aColSize + threadIdx.x; __shared__ float shared_data[256]; // 每个线程加载一个元素到共享内存 shared_data[threadIdx.x] = (threadIdx.x< aColSize) ? aInputData[idx] : -FLT_MAX; __syncthreads(); // 每个线程规约自己的分段,将每个blockDim.x的值规约到数组最前面一段 for (int offset = blockDim.x; offset0; offset>>=1) { int idx2 = offset + threadIdx.x; if (idx2 < blockDim.x) { shared_data[threadIdx.x] = fmaxf(shared_data[threadIdx.x], shared_data[idx2]); } __syncthreads(); } // 第一个线程存储每个分段的最大值到全局内存 if (threadIdx.x == 0) { aOutput[blockIdx.x] = shared_data[0]; } } __global__ void maxRowKernel(float* aInputData, float* aOutput,unsigned int aColSize, unsigned int aRowSize) { //确定每个thread的基础index unsigned int idx = threadIdx.x*aColSize+ blockIdx.x; __shared__ float shared_data[512]; // 每个线程加载一个元素到共享内存 shared_data[threadIdx.x]= (threadIdx.x< aRowSize) ? aInputData[idx] : -FLT_MAX; __syncthreads(); // 每个线程规约自己的分段,将每个blockDim.x的值规约到数组最前面一段 for (int offset = blockDim.x; offset < aRowSize; offset+=blockDim.x) { if(threadIdx.x+offset < aRowSize){ shared_data[threadIdx.x]= fmaxf(shared_data[threadIdx.x], aInputData[idx + offset*aColSize]); } __syncthreads(); } // 规约最前面一段 for (int offset = blockDim.x/2; offset >0; offset>>=1) { int idx2 = offset + threadIdx.x; if (idx2 < blockDim.x) { shared_data[threadIdx.x] = fmaxf(shared_data[threadIdx.x], shared_data[idx2]); } __syncthreads(); } // 第一个线程存储每个分段的最大值到全局内存 if (threadIdx.x == 0) { aOutput[blockIdx.x] = shared_data[0]; } } CudaMatrix Aurora::max(const CudaMatrix &aMatrix, FunctionDirection direction) { long a,b; return max(aMatrix,direction,a,b); } CudaMatrix Aurora::max(const CudaMatrix &aMatrix, FunctionDirection direction, long& rowIdx, long& colIdx) { if (aMatrix.getDimSize(2)>1 || aMatrix.isComplex()) { std::cerr<< (aMatrix.getDimSize(2) > 1 ? "max() not support 3D data!" : "max() not support complex value type!") << std::endl; return CudaMatrix(); } //针对向量行等于列 if (aMatrix.isVector()){ direction = All; } switch (direction) { case All: { thrust::device_ptr d_ptr = thrust::device_pointer_cast(aMatrix.getData()); auto max_iter = thrust::max_element(thrust::device,d_ptr,d_ptr+aMatrix.getDataSize()); int index = max_iter-d_ptr; rowIdx = index%aMatrix.getDimSize(0); colIdx = index/aMatrix.getDimSize(0); float* data = nullptr; cudaMalloc((void**)&data, sizeof(float)); auto ret = Aurora::CudaMatrix::fromRawData(data,1,1,1); ret.setValue(0, *max_iter); return ret; } case Row: { float* matData = aMatrix.getData(); float* retData = nullptr; int rowCount = aMatrix.getDimSize(1); cudaMalloc((void**)&retData, sizeof(float)*aMatrix.getDimSize(0)); if (rowCount<512){ maxRowKernel<<>>(matData,retData,aMatrix.getDimSize(0),rowCount); } else if (aMatrix.getDimSize(1)/aMatrix.getDimSize(0)>4){ maxRowKernel<<>>(matData,retData,aMatrix.getDimSize(0),rowCount); } else{ maxRowKernel<<>>(matData,retData,aMatrix.getDimSize(0),rowCount); } cudaDeviceSynchronize(); CudaMatrix ret = Aurora::CudaMatrix::fromRawData(retData,aMatrix.getDimSize(0),1); return ret; } case Column: default: { float* matData = aMatrix.getData(); float* retData = nullptr; int colCount = aMatrix.getDimSize(0); cudaMalloc((void**)&retData, sizeof(float)*aMatrix.getDimSize(1)); maxColKernel<<>>(matData,retData,colCount); cudaDeviceSynchronize(); CudaMatrix ret = Aurora::CudaMatrix::fromRawData(retData,1,aMatrix.getDimSize(1)); return ret; } } } CudaMatrix vxmMax(CudaMatrix aVec, CudaMatrix aMat) { //col-vec x mat if (aVec.getDimSize(1) == 1 && aVec.getDimSize(0) == aMat.getDimSize(0)) { std::cout<<"max mat and col-vec "<1 || aMatrix.isComplex()) { std::cerr << (aMatrix.getDimSize(2) > 1 ? "max() not support 3D data!" : "max() not support complex value type!") << std::endl; return CudaMatrix(); } if (aOther.getDimSize(2)>1 || aOther.isComplex()) { std::cerr << (aOther.getDimSize(2) > 1 ? "max() not support 3D data!" : "max() not support complex value type!") << std::endl; return CudaMatrix(); } //same shape if (aMatrix.compareShape(aOther)){ size_t size = aMatrix.getDataSize() * aMatrix.getValueType(); float* data = nullptr; cudaMalloc((void**)&data, sizeof(float) * size); auto lambda = [=] __host__ __device__ (const float& x, const float& y){ return fmaxf(x,y); }; thrust::transform(thrust::device,aMatrix.getData(), aMatrix.getData()+aMatrix.getDataSize(),aOther.getData(), data,lambda); return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2), aMatrix.getValueType()); } // one is scalar else if (aMatrix.getDataSize() == 1 || aOther.getDataSize() == 1){ float scalar = (aMatrix.getDataSize() == 1)?aMatrix.getValue(0):aOther.getValue(0); auto matrix = (aMatrix.getDataSize() == 1)?aOther:aMatrix; return max(matrix, scalar); } else if (aMatrix.isVector()) { return ::vxmMax(aMatrix,aOther); } else if (aOther.isVector()) { return ::vxmMax(aOther,aMatrix); } std::cerr << "max(A,B) with matrix must be like A[MxN] - B[1xN] or A[Mx1] - B[MxN]" << std::endl; return CudaMatrix(); } CudaMatrix Aurora::max(const CudaMatrix &aMatrix, const float aValue){ size_t size = aMatrix.getDataSize() * aMatrix.getValueType(); float* data = nullptr; cudaMalloc((void**)&data, sizeof(float) * size); auto lambda = [=] __host__ __device__ (const float& x){ return fmaxf(x,aValue); }; thrust::transform(thrust::device,aMatrix.getData(),aMatrix.getData()+aMatrix.getDataSize(), data,lambda); return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2), aMatrix.getValueType()); } __global__ void minColKernel(float* aInputData, float* aOutput, unsigned int aColSize) { //确定每个thread的index unsigned int idx = blockIdx.x * aColSize + threadIdx.x; __shared__ float shared_data[256]; // 每个线程加载一个元素到共享内存 shared_data[threadIdx.x] = (threadIdx.x< aColSize) ? aInputData[idx] : FLT_MAX; __syncthreads(); // 每个线程规约自己的分段,将每个blockDim.x的值规约到数组最前面一段 for (int offset = blockDim.x; offset0; offset>>=1) { int idx2 = offset + threadIdx.x; if (idx2 < blockDim.x) { shared_data[threadIdx.x] = fminf(shared_data[threadIdx.x], shared_data[idx2]); } __syncthreads(); } // 第一个线程存储每个分段的最大值到全局内存 if (threadIdx.x == 0) { aOutput[blockIdx.x] = shared_data[0]; } } __global__ void minRowKernel(float* aInputData, float* aOutput,unsigned int aColSize, unsigned int aRowSize) { //确定每个thread的基础index unsigned int idx = threadIdx.x*aColSize+ blockIdx.x; __shared__ float shared_data[512]; // 每个线程加载一个元素到共享内存 shared_data[threadIdx.x]= (threadIdx.x< aRowSize) ? aInputData[idx] : FLT_MAX; __syncthreads(); // 每个线程规约自己的分段,将每个blockDim.x的值规约到数组最前面一段 for (int offset = blockDim.x; offset < aRowSize; offset+=blockDim.x) { if(threadIdx.x+offset < aRowSize){ shared_data[threadIdx.x]= fminf(shared_data[threadIdx.x], aInputData[idx + offset*aColSize]); } __syncthreads(); } // 规约最前面一段 for (int offset = blockDim.x/2; offset >0; offset>>=1) { int idx2 = offset + threadIdx.x; if (idx2 < blockDim.x) { shared_data[threadIdx.x] = fminf(shared_data[threadIdx.x], shared_data[idx2]); } __syncthreads(); } // 第一个线程存储每个分段的最大值到全局内存 if (threadIdx.x == 0) { aOutput[blockIdx.x] = shared_data[0]; } } CudaMatrix Aurora::min(const CudaMatrix &aMatrix, FunctionDirection direction) { long a,b; return min(aMatrix,direction,a,b); } CudaMatrix Aurora::min(const CudaMatrix &aMatrix, FunctionDirection direction, long& rowIdx, long& colIdx) { if (aMatrix.getDimSize(2)>1 || aMatrix.isComplex()) { std::cerr<< (aMatrix.getDimSize(2) > 1 ? "min() not support 3D data!" : "min() not support complex value type!") << std::endl; return CudaMatrix(); } //针对向量行等于列 if (aMatrix.isVector()){ direction = All; } switch (direction) { case All: { thrust::device_ptr d_ptr = thrust::device_pointer_cast(aMatrix.getData()); auto max_iter = thrust::min_element(thrust::device,d_ptr,d_ptr+aMatrix.getDataSize()); int index = max_iter-d_ptr; rowIdx = index%aMatrix.getDimSize(0); colIdx = index/aMatrix.getDimSize(0); float* data = nullptr; cudaMalloc((void**)&data, sizeof(float)); auto ret = Aurora::CudaMatrix::fromRawData(data,1,1,1); ret.setValue(0, *max_iter); return ret; } case Row: { float* matData = aMatrix.getData(); float* retData = nullptr; int rowCount = aMatrix.getDimSize(1); cudaMalloc((void**)&retData, sizeof(float)*aMatrix.getDimSize(0)); if (rowCount<512){ minRowKernel<<>>(matData,retData,aMatrix.getDimSize(0),rowCount); } else if (aMatrix.getDimSize(1)/aMatrix.getDimSize(0)>4){ minRowKernel<<>>(matData,retData,aMatrix.getDimSize(0),rowCount); } else{ minRowKernel<<>>(matData,retData,aMatrix.getDimSize(0),rowCount); } cudaDeviceSynchronize(); CudaMatrix ret = Aurora::CudaMatrix::fromRawData(retData,aMatrix.getDimSize(0),1); return ret; } case Column: default: { float* matData = aMatrix.getData(); float* retData = nullptr; int colCount = aMatrix.getDimSize(0); cudaMalloc((void**)&retData, sizeof(float)*aMatrix.getDimSize(1)); minColKernel<<>>(matData,retData,colCount); cudaDeviceSynchronize(); CudaMatrix ret = Aurora::CudaMatrix::fromRawData(retData,1,aMatrix.getDimSize(1)); return ret; } } } CudaMatrix vxmMin(CudaMatrix aVec, CudaMatrix aMat) { //col-vec x mat if (aVec.getDimSize(1) == 1 && aVec.getDimSize(0) == aMat.getDimSize(0)) { std::cout<<"min mat and col-vec "<1 || aMatrix.isComplex()) { std::cerr << (aMatrix.getDimSize(2) > 1 ? "min() not support 3D data!" : "min() not support complex value type!") << std::endl; return CudaMatrix(); } if (aOther.getDimSize(2)>1 || aOther.isComplex()) { std::cerr << (aOther.getDimSize(2) > 1 ? "min() not support 3D data!" : "min() not support complex value type!") << std::endl; return CudaMatrix(); } //same shape if (aMatrix.compareShape(aOther)){ size_t size = aMatrix.getDataSize() * aMatrix.getValueType(); float* data = nullptr; cudaMalloc((void**)&data, sizeof(float) * size); auto lambda = [=] __host__ __device__ (const float& x, const float& y){ return fminf(x,y); }; thrust::transform(thrust::device,aMatrix.getData(), aMatrix.getData()+aMatrix.getDataSize(),aOther.getData(), data,lambda); return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2), aMatrix.getValueType()); } // one is scalar else if (aMatrix.getDataSize() == 1 || aOther.getDataSize() == 1){ float scalar = (aMatrix.getDataSize() == 1)?aMatrix.getValue(0):aOther.getValue(0); auto matrix = (aMatrix.getDataSize() == 1)?aOther:aMatrix; return min(matrix, scalar); } else if (aMatrix.isVector()) { return ::vxmMin(aMatrix,aOther); } else if (aOther.isVector()) { return ::vxmMin(aOther,aMatrix); } std::cerr << "min(A,B) with matrix must be like A[MxN] - B[1xN] or A[Mx1] - B[MxN]" << std::endl; return CudaMatrix(); } CudaMatrix Aurora::min(const CudaMatrix &aMatrix, const float aValue){ size_t size = aMatrix.getDataSize() * aMatrix.getValueType(); float* data = nullptr; cudaMalloc((void**)&data, sizeof(float) * size); auto lambda = [=] __host__ __device__ (const float& x){ return fminf(x,aValue); }; thrust::transform(thrust::device,aMatrix.getData(),aMatrix.getData()+aMatrix.getDataSize(), data,lambda); return Aurora::CudaMatrix::fromRawData(data, aMatrix.getDimSize(0), aMatrix.getDimSize(1), aMatrix.getDimSize(2), aMatrix.getValueType()); }