40 #include <viennacl/vector.hpp>
52 CGPUVector<T>::CGPUVector()
58 CGPUVector<T>::CGPUVector(
index_t length) : vector(new VCLMemoryArray())
64 viennacl::backend::memory_create(*vector,
sizeof(T)*vlen,
69 CGPUVector<T>::CGPUVector(std::shared_ptr<VCLMemoryArray> mem,
index_t length,
80 CGPUVector<T>::CGPUVector(
const SGVector<T>& cpu_vec) : vector(new VCLMemoryArray())
85 viennacl::backend::memory_create(*vector,
sizeof(T)*vlen,
88 viennacl::backend::memory_write(*vector, 0, vlen*
sizeof(T),
94 CGPUVector<T>::CGPUVector(
const EigenVectorXt& cpu_vec)
95 : vector(new VCLMemoryArray())
98 vlen = cpu_vec.size();
100 viennacl::backend::memory_create(*vector,
sizeof(T)*vlen,
101 viennacl::context());
103 viennacl::backend::memory_write(*vector, 0, vlen*
sizeof(T),
108 CGPUVector<T>::CGPUVector(
const EigenRowVectorXt& cpu_vec)
109 : vector(new VCLMemoryArray())
112 vlen = cpu_vec.size();
114 viennacl::backend::memory_create(*vector,
sizeof(T)*vlen,
115 viennacl::context());
117 viennacl::backend::memory_write(*vector, 0, vlen*
sizeof(T),
122 CGPUVector<T>::operator EigenVectorXt()
const
124 EigenVectorXt cpu_vec(vlen);
126 viennacl::backend::memory_read(*vector, offset*
sizeof(T), vlen*
sizeof(T),
133 CGPUVector<T>::operator EigenRowVectorXt()
const
135 EigenRowVectorXt cpu_vec(vlen);
137 viennacl::backend::memory_read(*vector, offset*
sizeof(T), vlen*
sizeof(T),
145 CGPUVector<T>::operator SGVector<T>()
const
147 SGVector<T> cpu_vec(vlen);
149 viennacl::backend::memory_read(*vector, offset*
sizeof(T), vlen*
sizeof(T),
156 typename CGPUVector<T>::VCLVectorBase CGPUVector<T>::vcl_vector()
158 return VCLVectorBase(*vector,vlen, offset, 1);
162 void CGPUVector<T>::display_vector(
const char* name)
const
164 ((SGVector<T>)*
this).display_vector(name);
168 void CGPUVector<T>::zero()
170 vcl_vector().clear();
174 void CGPUVector<T>::set_const(T value)
176 VCLVectorBase v = vcl_vector();
177 viennacl::linalg::vector_assign(v, value);
181 viennacl::const_entry_proxy< T > CGPUVector<T>::operator[](
index_t index)
const
183 return viennacl::const_entry_proxy<T>(offset+index, *vector);
187 viennacl::entry_proxy< T > CGPUVector<T>::operator[](
index_t index)
189 return viennacl::entry_proxy<T>(offset+index, *vector);
193 void CGPUVector<T>::init()
199 template class CGPUVector<char>;
200 template class CGPUVector<uint8_t>;
201 template class CGPUVector<int16_t>;
202 template class CGPUVector<uint16_t>;
203 template class CGPUVector<int32_t>;
204 template class CGPUVector<uint32_t>;
205 template class CGPUVector<int64_t>;
206 template class CGPUVector<uint64_t>;
207 template class CGPUVector<float32_t>;
208 template class CGPUVector<float64_t>;
212 #endif // HAVE_VIENNACL
all of classes and functions are contained in the shogun namespace