113 lines
2.9 KiB
C
113 lines
2.9 KiB
C
|
|
/*
|
||
|
|
* container_gpu.h
|
||
|
|
*
|
||
|
|
* Created on: Jun 9, 2011
|
||
|
|
* Author: ditlevsen
|
||
|
|
*/
|
||
|
|
|
||
|
|
#ifndef CONTAINER_DEVICE_H_
|
||
|
|
#define CONTAINER_DEVICE_H_
|
||
|
|
|
||
|
|
#include "container_host.h"
|
||
|
|
|
||
|
|
class mat_device {
|
||
|
|
private:
|
||
|
|
float *pbuf;
|
||
|
|
int ld;
|
||
|
|
public:
|
||
|
|
const int dim_x;
|
||
|
|
const int dim_y;
|
||
|
|
const int dim_z;
|
||
|
|
const int len;
|
||
|
|
const T_mat_format format;
|
||
|
|
|
||
|
|
mat_device(int l_y, int l_x, int l_z, bool init=true, bool pitch=false, T_mat_format storage=mat_col_major);
|
||
|
|
mat_device(int num_elements, bool init=true);
|
||
|
|
mat_device(const mat_device &m);
|
||
|
|
mat_device(const mat_host &m, bool pitch=false);
|
||
|
|
~mat_device() { if(pbuf) HANDLE_ERROR(cudaFree(pbuf)); }
|
||
|
|
|
||
|
|
int leading_dim() const { return ld; }
|
||
|
|
|
||
|
|
float *data_dev_ptr() { return pbuf; }
|
||
|
|
const float *data_dev_ptr() const { return pbuf; }
|
||
|
|
|
||
|
|
mat_device &operator=(const mat_device &m) throw(illegal_mat_gpu_assignment);
|
||
|
|
mat_device &operator=(const mat_host &m) throw(illegal_mat_gpu_assignment);
|
||
|
|
};
|
||
|
|
|
||
|
|
class sparse_mat_device {
|
||
|
|
private:
|
||
|
|
int *p_ind;
|
||
|
|
int *p_ptr;
|
||
|
|
float *p_val;
|
||
|
|
public:
|
||
|
|
const int dim_y;
|
||
|
|
const int dim_x;
|
||
|
|
const int nnz;
|
||
|
|
T_sparse_mat_format format;
|
||
|
|
|
||
|
|
sparse_mat_device(int num_dim_y, int num_dim_x, int n_nonzero, T_sparse_mat_format storage_format, bool init=true);
|
||
|
|
sparse_mat_device(const sparse_mat_device &m);
|
||
|
|
sparse_mat_device(const sparse_mat_host &m);
|
||
|
|
~sparse_mat_device();
|
||
|
|
|
||
|
|
float *val() { return p_val; }
|
||
|
|
const float *val() const { return p_val; }
|
||
|
|
|
||
|
|
int *ptr() { return p_ptr; }
|
||
|
|
const int *ptr() const { return p_ptr; }
|
||
|
|
|
||
|
|
int *ind() { return p_ind; }
|
||
|
|
const int *ind() const { return p_ind; }
|
||
|
|
|
||
|
|
sparse_mat_device &operator=(const sparse_mat_device &m) throw(illegal_mat_gpu_assignment);
|
||
|
|
sparse_mat_device &operator=(const sparse_mat_host &m) throw(illegal_mat_gpu_assignment);
|
||
|
|
};
|
||
|
|
|
||
|
|
//---------------------------------------
|
||
|
|
// class geomety_device
|
||
|
|
//---------------------------------------
|
||
|
|
|
||
|
|
class geometry_device {
|
||
|
|
private:
|
||
|
|
int *x_emitters;
|
||
|
|
int *y_emitters;
|
||
|
|
int *z_emitters;
|
||
|
|
int *x_receivers;
|
||
|
|
int *y_receivers;
|
||
|
|
int *z_receivers;
|
||
|
|
|
||
|
|
public:
|
||
|
|
int *x_em_dev_ptr() { return x_emitters; }
|
||
|
|
const int *x_em_dev_ptr() const { return x_emitters; }
|
||
|
|
int *y_em_dev_ptr() { return y_emitters; }
|
||
|
|
const int *y_em_dev_ptr() const { return y_emitters; }
|
||
|
|
int *z_em_dev_ptr() { return z_emitters; }
|
||
|
|
const int *z_em_dev_ptr() const { return z_emitters; }
|
||
|
|
int *x_re_dev_ptr() { return x_receivers; }
|
||
|
|
const int *x_re_dev_ptr() const { return x_receivers; }
|
||
|
|
int *y_re_dev_ptr() { return y_receivers; }
|
||
|
|
const int *y_re_dev_ptr() const { return y_receivers; }
|
||
|
|
int *z_re_dev_ptr() { return z_receivers; }
|
||
|
|
const int *z_re_dev_ptr() const { return z_receivers; }
|
||
|
|
|
||
|
|
const int num_emitters;
|
||
|
|
const int num_receivers;
|
||
|
|
const int rv_x;
|
||
|
|
const int rv_y;
|
||
|
|
const int rv_z;
|
||
|
|
const float scale_factor;
|
||
|
|
|
||
|
|
geometry_device(const geometry_host &geom_host);
|
||
|
|
geometry_device(const geometry_device &geom_dev);
|
||
|
|
~geometry_device();
|
||
|
|
};
|
||
|
|
|
||
|
|
void mat_gpu_to_host(mat_host &m_host, const mat_device &m) throw(illegal_mat_gpu_assignment);
|
||
|
|
|
||
|
|
|
||
|
|
|
||
|
|
|
||
|
|
#endif /* CONTAINER_DEVICE_H_ */
|