/* * container.h * * Created on: Mar 21, 2011 * Author: ditlevsen * * Class mat_host for handling matrices/vectors in conjunction with C-BLAS */ #ifndef CONTAINER_HOST_H_ #define CONTAINER_HOST_H_ #pragma once #include #include "handle_error.h" #include #include #include #include // enumerations... enum T_alloc {allocated_in_constructor, preallocated_buffer}; enum T_mat_format {mat_col_major, mat_row_major}; enum T_sparse_mat_format {sparse_mat_csc, sparse_mat_csr}; // exceptions... class illegal_mat_gpu_assignment: public std::runtime_error { public: illegal_mat_gpu_assignment(const std::string& message) : std::runtime_error(message) {}; }; //--------------------------------------- // class mat_host... //--------------------------------------- class mat_host { private: float *pbuf; T_alloc alloc_info; public: const int dim_x; const int dim_y; const int dim_z; const int len; const T_mat_format format; const bool pagelocked; const unsigned int alloc_flags; mat_host(int l_y, int l_x, int l_z, T_mat_format storage=mat_col_major, bool init=true, bool mem_pagelocked=false, unsigned int flags=cudaHostAllocDefault); mat_host(int num_elements, bool init=true, bool mem_pagelocked=false, unsigned int flags=cudaHostAllocDefault); mat_host(int l_y, int l_x, int l_z, float *buffer, T_mat_format storage=mat_col_major, bool mem_pagelocked=false, unsigned int flags=cudaHostAllocDefault); mat_host(int num_elements, float *buffer, bool mem_pagelocked=false, unsigned int flags=cudaHostAllocDefault); mat_host(const mat_host &m); ~mat_host(); float *data() { return pbuf; } const float *data() const { return pbuf; } float &operator[](int i) { return pbuf[i]; } const float &operator[](int i) const { return pbuf[i]; } mat_host &operator=(const mat_host &m) throw(illegal_mat_gpu_assignment); void randomFill(float scl); }; //--------------------------------------- // class sparse_mat_host... //--------------------------------------- class sparse_mat_host { private: int *p_ptr; int *p_ind; float *p_val; T_alloc alloc_info; public: const int dim_y; const int dim_x; const int nnz; const T_sparse_mat_format format; const bool pagelocked; const unsigned int alloc_flags; sparse_mat_host(int num_dim_y, int num_dim_x, int n_nonzero, T_sparse_mat_format storage_format=sparse_mat_csc, bool init=true, bool mem_pagelocked=false, unsigned int flags=cudaHostAllocDefault); sparse_mat_host(int num_dim_y, int num_dim_x, int n_nonzero, float *buff_val, int *buff_ptr, int *buff_ind, T_sparse_mat_format storage_format=sparse_mat_csc, bool mem_pagelocked=false, unsigned int flags=cudaHostAllocDefault); sparse_mat_host(const sparse_mat_host &m); ~sparse_mat_host(); float *val() { return p_val; } const float *val() const { return p_val; } int *ptr() { return p_ptr; } const int *ptr() const { return p_ptr; } int *ind() { return p_ind; } const int *ind() const { return p_ind; } sparse_mat_host &operator=(const sparse_mat_host &m) throw(illegal_mat_gpu_assignment); }; //--------------------------------------- // Struct geomety_host //--------------------------------------- struct geometry_host { int *x_emitters; int *y_emitters; int *z_emitters; int *x_receivers; int *y_receivers; int *z_receivers; int num_emitters; int num_receivers; int rv_x; int rv_y; int rv_z; float scale_factor; }; #endif /* CONTAINER_HOST_H_ */