TensorDeviceCuda.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #if defined(EIGEN_USE_GPU) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_CUDA_H)
11 #define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_CUDA_H
12 
13 
14 namespace Eigen {
15 
16 // This defines an interface that GPUDevice can take to use
17 // CUDA streams underneath.
18 class StreamInterface {
19  public:
20  virtual ~StreamInterface() {}
21 
22  virtual const cudaStream_t& stream() const = 0;
23  virtual const cudaDeviceProp& deviceProperties() const = 0;
24 
25  // Allocate memory on the actual device where the computation will run
26  virtual void* allocate(size_t num_bytes) const = 0;
27  virtual void deallocate(void* buffer) const = 0;
28 };
29 
30 static cudaDeviceProp* m_deviceProperties;
31 static bool m_devicePropInitialized = false;
32 
33 static void initializeDeviceProp() {
34  if (!m_devicePropInitialized) {
35  if (!m_devicePropInitialized) {
36  int num_devices;
37  cudaError_t status = cudaGetDeviceCount(&num_devices);
38  EIGEN_UNUSED_VARIABLE(status)
39  assert(status == cudaSuccess);
40  m_deviceProperties = new cudaDeviceProp[num_devices];
41  for (int i = 0; i < num_devices; ++i) {
42  status = cudaGetDeviceProperties(&m_deviceProperties[i], i);
43  assert(status == cudaSuccess);
44  }
45  m_devicePropInitialized = true;
46  }
47  }
48 }
49 
50 static const cudaStream_t default_stream = cudaStreamDefault;
51 
52 class CudaStreamDevice : public StreamInterface {
53  public:
54  // Use the default stream on the current device
55  CudaStreamDevice() : stream_(&default_stream) {
56  cudaGetDevice(&device_);
57  initializeDeviceProp();
58  }
59  // Use the default stream on the specified device
60  CudaStreamDevice(int device) : stream_(&default_stream), device_(device) {
61  initializeDeviceProp();
62  }
63  // Use the specified stream. Note that it's the
64  // caller responsibility to ensure that the stream can run on
65  // the specified device. If no device is specified the code
66  // assumes that the stream is associated to the current gpu device.
67  CudaStreamDevice(const cudaStream_t* stream, int device = -1)
68  : stream_(stream), device_(device) {
69  if (device < 0) {
70  cudaGetDevice(&device_);
71  } else {
72  int num_devices;
73  cudaError_t err = cudaGetDeviceCount(&num_devices);
74  EIGEN_UNUSED_VARIABLE(err)
75  assert(err == cudaSuccess);
76  assert(device < num_devices);
77  device_ = device;
78  }
79  initializeDeviceProp();
80  }
81 
82  const cudaStream_t& stream() const { return *stream_; }
83  const cudaDeviceProp& deviceProperties() const {
84  return m_deviceProperties[device_];
85  }
86  virtual void* allocate(size_t num_bytes) const {
87  cudaError_t err = cudaSetDevice(device_);
88  EIGEN_UNUSED_VARIABLE(err)
89  assert(err == cudaSuccess);
90  void* result;
91  err = cudaMalloc(&result, num_bytes);
92  assert(err == cudaSuccess);
93  assert(result != NULL);
94  return result;
95  }
96  virtual void deallocate(void* buffer) const {
97  cudaError_t err = cudaSetDevice(device_);
98  EIGEN_UNUSED_VARIABLE(err)
99  assert(err == cudaSuccess);
100  assert(buffer != NULL);
101  err = cudaFree(buffer);
102  assert(err == cudaSuccess);
103  }
104 
105  private:
106  const cudaStream_t* stream_;
107  int device_;
108 };
109 
110 struct GpuDevice {
111  // The StreamInterface is not owned: the caller is
112  // responsible for its initialization and eventual destruction.
113  explicit GpuDevice(const StreamInterface* stream) : stream_(stream) {
114  eigen_assert(stream);
115  }
116 
117  // TODO(bsteiner): This is an internal API, we should not expose it.
118  EIGEN_STRONG_INLINE const cudaStream_t& stream() const {
119  return stream_->stream();
120  }
121 
122  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const {
123 #ifndef __CUDA_ARCH__
124  return stream_->allocate(num_bytes);
125 #else
126  eigen_assert(false && "The default device should be used instead to generate kernel code");
127  return NULL;
128 #endif
129  }
130 
131  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void deallocate(void* buffer) const {
132 #ifndef __CUDA_ARCH__
133  stream_->deallocate(buffer);
134 
135 #else
136  eigen_assert(false && "The default device should be used instead to generate kernel code");
137 #endif
138  }
139 
140  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const {
141 #ifndef __CUDA_ARCH__
142  cudaError_t err = cudaMemcpyAsync(dst, src, n, cudaMemcpyDeviceToDevice,
143  stream_->stream());
144  EIGEN_UNUSED_VARIABLE(err)
145  assert(err == cudaSuccess);
146 #else
147  eigen_assert(false && "The default device should be used instead to generate kernel code");
148 #endif
149  }
150 
151  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const {
152 #ifndef __CUDA_ARCH__
153  cudaError_t err =
154  cudaMemcpyAsync(dst, src, n, cudaMemcpyHostToDevice, stream_->stream());
155  EIGEN_UNUSED_VARIABLE(err)
156  assert(err == cudaSuccess);
157 #else
158  eigen_assert(false && "The default device should be used instead to generate kernel code");
159 #endif
160  }
161 
162  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const {
163 #ifndef __CUDA_ARCH__
164  cudaError_t err =
165  cudaMemcpyAsync(dst, src, n, cudaMemcpyDeviceToHost, stream_->stream());
166  EIGEN_UNUSED_VARIABLE(err)
167  assert(err == cudaSuccess);
168 #else
169  eigen_assert(false && "The default device should be used instead to generate kernel code");
170 #endif
171  }
172 
173  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const {
174 #ifndef __CUDA_ARCH__
175  cudaError_t err = cudaMemsetAsync(buffer, c, n, stream_->stream());
176  EIGEN_UNUSED_VARIABLE(err)
177  assert(err == cudaSuccess);
178 #else
179  eigen_assert(false && "The default device should be used instead to generate kernel code");
180 #endif
181  }
182 
183  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t numThreads() const {
184  // FIXME
185  return 32;
186  }
187 
188  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const {
189  // FIXME
190  return 48*1024;
191  }
192 
193  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const {
194  // We won't try to take advantage of the l2 cache for the time being, and
195  // there is no l3 cache on cuda devices.
196  return firstLevelCacheSize();
197  }
198 
199  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void synchronize() const {
200 #if defined(__CUDACC__) && !defined(__CUDA_ARCH__)
201  cudaError_t err = cudaStreamSynchronize(stream_->stream());
202  EIGEN_UNUSED_VARIABLE(err)
203  assert(err == cudaSuccess);
204 #else
205  assert(false && "The default device should be used instead to generate kernel code");
206 #endif
207  }
208 
209  inline int getNumCudaMultiProcessors() const {
210  return stream_->deviceProperties().multiProcessorCount;
211  }
212  inline int maxCudaThreadsPerBlock() const {
213  return stream_->deviceProperties().maxThreadsPerBlock;
214  }
215  inline int maxCudaThreadsPerMultiProcessor() const {
216  return stream_->deviceProperties().maxThreadsPerMultiProcessor;
217  }
218  inline int sharedMemPerBlock() const {
219  return stream_->deviceProperties().sharedMemPerBlock;
220  }
221  inline int majorDeviceVersion() const {
222  return stream_->deviceProperties().major;
223  }
224 
225  // This function checks if the CUDA runtime recorded an error for the
226  // underlying stream device.
227  inline bool ok() const {
228 #ifdef __CUDACC__
229  cudaError_t error = cudaStreamQuery(stream_->stream());
230  return (error == cudaSuccess) || (error == cudaErrorNotReady);
231 #else
232  return false;
233 #endif
234  }
235 
236  private:
237  const StreamInterface* stream_;
238 
239 };
240 
241 
242 #define LAUNCH_CUDA_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \
243  (kernel) <<< (gridsize), (blocksize), (sharedmem), (device).stream() >>> (__VA_ARGS__); \
244  assert(cudaGetLastError() == cudaSuccess);
245 
246 
247 // FIXME: Should be device and kernel specific.
248 #ifdef __CUDACC__
249 static inline void setCudaSharedMemConfig(cudaSharedMemConfig config) {
250  cudaError_t status = cudaDeviceSetSharedMemConfig(config);
251  EIGEN_UNUSED_VARIABLE(status)
252  assert(status == cudaSuccess);
253 }
254 #endif
255 
256 } // end namespace Eigen
257 
258 #endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_TYPE_H
Namespace containing all symbols from the Eigen library.
Definition: CXX11Meta.h:13