33 #ifndef EIGEN_GENERAL_MATRIX_MATRIX_MKL_H
34 #define EIGEN_GENERAL_MATRIX_MATRIX_MKL_H
49 #define GEMM_SPECIALIZATION(EIGTYPE, EIGPREFIX, MKLTYPE, MKLPREFIX) \
52 int LhsStorageOrder, bool ConjugateLhs, \
53 int RhsStorageOrder, bool ConjugateRhs> \
54 struct general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor> \
56 static void run(Index rows, Index cols, Index depth, \
57 const EIGTYPE* _lhs, Index lhsStride, \
58 const EIGTYPE* _rhs, Index rhsStride, \
59 EIGTYPE* res, Index resStride, \
61 level3_blocking<EIGTYPE, EIGTYPE>& , \
62 GemmParallelInfo<Index>* ) \
66 char transa, transb; \
67 MKL_INT m, n, k, lda, ldb, ldc; \
68 const EIGTYPE *a, *b; \
69 MKLTYPE alpha_, beta_; \
70 MatrixX##EIGPREFIX a_tmp, b_tmp; \
74 transa = (LhsStorageOrder==RowMajor) ? ((ConjugateLhs) ? 'C' : 'T') : 'N'; \
75 transb = (RhsStorageOrder==RowMajor) ? ((ConjugateRhs) ? 'C' : 'T') : 'N'; \
83 assign_scalar_eig2mkl(alpha_, alpha); \
84 assign_scalar_eig2mkl(beta_, myone); \
87 lda = (MKL_INT)lhsStride; \
88 ldb = (MKL_INT)rhsStride; \
89 ldc = (MKL_INT)resStride; \
92 if ((LhsStorageOrder==ColMajor) && (ConjugateLhs)) { \
93 Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > lhs(_lhs,m,k,OuterStride<>(lhsStride)); \
94 a_tmp = lhs.conjugate(); \
96 lda = a_tmp.outerStride(); \
99 if ((RhsStorageOrder==ColMajor) && (ConjugateRhs)) { \
100 Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > rhs(_rhs,k,n,OuterStride<>(rhsStride)); \
101 b_tmp = rhs.conjugate(); \
103 ldb = b_tmp.outerStride(); \
106 MKLPREFIX##gemm(&transa, &transb, &m, &n, &k, &alpha_, (const MKLTYPE*)a, &lda, (const MKLTYPE*)b, &ldb, &beta_, (MKLTYPE*)res, &ldc); \
109 GEMM_SPECIALIZATION(
double, d,
double, d)
110 GEMM_SPECIALIZATION(
float, f,
float, s)
111 GEMM_SPECIALIZATION(dcomplex, cd, MKL_Complex16, z)
112 GEMM_SPECIALIZATION(scomplex, cf, MKL_Complex8, c)
118 #endif // EIGEN_GENERAL_MATRIX_MATRIX_MKL_H
Definition: Eigen_Colamd.h:54