95struct TensorEvaluator<const TensorLayoutSwapOp<ArgType>, Device>
97 typedef TensorLayoutSwapOp<ArgType> XprType;
98 typedef typename XprType::Index Index;
99 static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
100 typedef DSizes<Index, NumDims> Dimensions;
103 IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
104 PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
106 PreferBlockAccess = TensorEvaluator<ArgType, Device>::PreferBlockAccess,
107 Layout = (
static_cast<int>(TensorEvaluator<ArgType, Device>::Layout) ==
static_cast<int>(
ColMajor)) ?
RowMajor :
ColMajor,
109 RawAccess = TensorEvaluator<ArgType, Device>::RawAccess
113 typedef internal::TensorBlockNotImplemented TensorBlock;
116 EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device)
117 : m_impl(op.expression(), device)
119 for(
int i = 0; i < NumDims; ++i) {
120 m_dimensions[i] = m_impl.dimensions()[NumDims-1-i];
126 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void bind(cl::sycl::handler &cgh)
const {
131 typedef typename XprType::Scalar Scalar;
132 typedef typename XprType::CoeffReturnType CoeffReturnType;
133 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
134 typedef StorageMemory<CoeffReturnType, Device> Storage;
135 typedef typename Storage::Type EvaluatorPointerType;
137 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
return m_dimensions; }
139 EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(EvaluatorPointerType data) {
140 return m_impl.evalSubExprsIfNeeded(data);
142 EIGEN_STRONG_INLINE
void cleanup() {
146 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index)
const
148 return m_impl.coeff(index);
151 template<
int LoadMode>
152 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index)
const
154 return m_impl.template packet<LoadMode>(index);
157 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(
bool vectorized)
const {
158 return m_impl.costPerCoeff(vectorized);
161 EIGEN_DEVICE_FUNC
typename Storage::Type data()
const {
162 return constCast(m_impl.data());
165 const TensorEvaluator<ArgType, Device>& impl()
const {
return m_impl; }
168 TensorEvaluator<ArgType, Device> m_impl;
169 Dimensions m_dimensions;
175 struct TensorEvaluator<TensorLayoutSwapOp<ArgType>, Device>
176 :
public TensorEvaluator<const TensorLayoutSwapOp<ArgType>, Device>
178 typedef TensorEvaluator<const TensorLayoutSwapOp<ArgType>, Device> Base;
179 typedef TensorLayoutSwapOp<ArgType> XprType;
182 IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
183 PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
185 PreferBlockAccess = TensorEvaluator<ArgType, Device>::PreferBlockAccess,
186 Layout = (
static_cast<int>(TensorEvaluator<ArgType, Device>::Layout) ==
static_cast<int>(
ColMajor)) ?
RowMajor :
ColMajor,
191 typedef internal::TensorBlockNotImplemented TensorBlock;
194 EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device)
198 typedef typename XprType::Index Index;
199 typedef typename XprType::Scalar Scalar;
200 typedef typename XprType::CoeffReturnType CoeffReturnType;
201 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
203 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
205 return this->m_impl.coeffRef(index);
207 template <
int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
208 void writePacket(Index index,
const PacketReturnType& x)
210 this->m_impl.template writePacket<StoreMode>(index, x);