32 template<
class T,
class F>
36 for (std::size_t i = 0; i < M.
size1(); ++i)
37 for (std::size_t j = 0; j < M.
size2(); ++j)
46 for (std::size_t i = 0; i < cx.size(); ++i)
47 cx[i] = T(rand())/T(RAND_MAX);
52 void bench(
size_t BLAS1_N,
size_t BLAS2_M,
size_t BLAS2_N,
size_t BLAS3_M,
size_t BLAS3_N,
size_t BLAS3_K, std::string
const & prefix)
60 double time_previous, time_spent;
62 double time_per_benchmark = 1;
64 #define BENCHMARK_OP(OPERATION, NAME, PERF, INDEX) \
66 viennacl::backend::finish();\
70 while (time_spent < time_per_benchmark) \
72 time_previous = timer.get(); \
74 viennacl::backend::finish(); \
75 time_spent += timer.get() - time_previous; \
78 time_spent/=(double)Nruns; \
79 std::cout << prefix << NAME " : " << PERF << " " INDEX << std::endl; \
93 BENCHMARK_OP(x = y,
"COPY", std::setprecision(3) <<
double(2*BLAS1_N*
sizeof(T))/time_spent * 1e-9,
"GB/s")
94 BENCHMARK_OP(x = y + alpha*x,
"AXPY", std::setprecision(3) <<
double(3*BLAS1_N*
sizeof(T))/time_spent * 1e-9,
"GB/s")
95 BENCHMARK_OP(s =
inner_prod(x, y),
"DOT", std::setprecision(3) <<
double(2*BLAS1_N*
sizeof(T))/time_spent * 1e-9,
"GB/s")
108 BENCHMARK_OP(y =
prod(A, x),
"GEMV-N", std::setprecision(3) <<
double((BLAS3_M + BLAS3_N + BLAS3_M*BLAS3_N)*
sizeof(T))/time_spent * 1e-9,
"GB/s")
109 BENCHMARK_OP(x =
prod(
trans(A), y),
"GEMV-T", std::setprecision(3) <<
double((BLAS3_M + BLAS3_N + BLAS3_M*BLAS3_N)*
sizeof(T))/time_spent * 1e-9,
"GB/s")
122 BENCHMARK_OP(C =
prod(A, B),
"GEMM-NN",
double(2*BLAS3_M*BLAS3_N*BLAS3_K)/time_spent*1e-9,
"GFLOPs/s");
123 BENCHMARK_OP(C =
prod(A,
trans(BT)),
"GEMM-NT",
double(2*BLAS3_M*BLAS3_N*BLAS3_K)/time_spent*1e-9,
"GFLOPs/s");
124 BENCHMARK_OP(C =
prod(
trans(AT), B),
"GEMM-TN",
double(2*BLAS3_M*BLAS3_N*BLAS3_K)/time_spent*1e-9,
"GFLOPs/s");
134 #ifdef VIENNACL_WITH_OPENCL
135 std::cout << std::endl;
136 std::cout <<
"----------------------------------------------" << std::endl;
137 std::cout <<
" Device Info" << std::endl;
138 std::cout <<
"----------------------------------------------" << std::endl;
139 std::cout << std::endl;
141 std::cout << std::endl;
144 std::size_t BLAS1_N = 10000000;
146 std::size_t BLAS2_M = 3840;
147 std::size_t BLAS2_N = 3840;
149 std::size_t BLAS3_M = 1976;
150 std::size_t BLAS3_N = 1976;
151 std::size_t BLAS3_K = 1976;
153 std::cout <<
"Benchmark : BLAS" << std::endl;
154 std::cout <<
"----------------" << std::endl;
155 bench<float>(BLAS1_N, BLAS2_M, BLAS2_N, BLAS3_M, BLAS3_N, BLAS3_K,
"s");
156 std::cout <<
"----" << std::endl;
157 #ifdef VIENNACL_WITH_OPENCL
160 bench<double>(BLAS1_N, BLAS2_M, BLAS2_N, BLAS3_M, BLAS3_N, BLAS3_K,
"d");
void init_random(viennacl::matrix< T, F > &M)
This class represents a single scalar value on the GPU and behaves mostly like a built-in scalar type...
viennacl::enable_if< viennacl::is_any_sparse_matrix< M1 >::value, matrix_expression< const M1, const M1, op_trans > >::type trans(const M1 &mat)
Returns an expression template class representing a transposed matrix.
void bench(size_t BLAS1_N, size_t BLAS2_M, size_t BLAS2_N, size_t BLAS3_M, size_t BLAS3_N, size_t BLAS3_K, std::string const &prefix)
size_type internal_size() const
Returns the total amount of allocated memory in multiples of sizeof(NumericT)
void trans(matrix_expression< const matrix_base< NumericT, SizeT, DistanceT >, const matrix_base< NumericT, SizeT, DistanceT >, op_trans > const &proxy, matrix_base< NumericT > &temp_trans)
Generic interface for matrix-vector and matrix-matrix products. See viennacl/linalg/vector_operations...
Implementation of the dense matrix class.
viennacl::enable_if< viennacl::is_stl< typename viennacl::traits::tag_of< VectorT1 >::type >::value, typename VectorT1::value_type >::type inner_prod(VectorT1 const &v1, VectorT2 const &v2)
viennacl::ocl::device const & current_device()
Convenience function for returning the active device in the current context.
Generic interface for the computation of inner products. See viennacl/linalg/vector_operations.hpp for implementations.
std::string info(vcl_size_t indent=0, char indent_char= ' ') const
Returns an info string with a few properties of the device. Use full_info() to get all details...
#define BENCHMARK_OP(OPERATION, NAME, PERF, INDEX)
VectorT prod(std::vector< std::vector< T, A1 >, A2 > const &matrix, VectorT const &vector)
iterator begin()
Returns an iterator pointing to the beginning of the vector (STL like)
bool double_support() const
ViennaCL convenience function: Returns true if the device supports double precision.
size_type size2() const
Returns the number of columns.
void prod(const MatrixT1 &A, bool transposed_A, const MatrixT2 &B, bool transposed_B, MatrixT3 &C, ScalarT alpha, ScalarT beta)
Implementations of LU factorization for row-major and column-major dense matrices.
size_type size1() const
Returns the number of rows.
Proxy classes for vectors.
Proxy classes for matrices.
The vector type with operator-overloads and proxy classes is defined here. Linear algebra operations ...
size_type internal_size2() const
Returns the internal number of columns. Usually required for launching OpenCL kernels only...
size_type internal_size1() const
Returns the internal number of rows. Usually required for launching OpenCL kernels only...
size_type internal_size() const
Returns the internal length of the vector, which is given by size() plus the extra memory due to padd...
void lu_factorize(matrix< NumericT, viennacl::row_major > &A)
LU factorization of a row-major dense matrix.
void fast_copy(const const_vector_iterator< SCALARTYPE, ALIGNMENT > &gpu_begin, const const_vector_iterator< SCALARTYPE, ALIGNMENT > &gpu_end, CPU_ITERATOR cpu_begin)