00001 #ifndef VIENNACL_DEVICE_SPECIFIC_TEMPLATES_REDUCTION_TEMPLATE_HPP
00002 #define VIENNACL_DEVICE_SPECIFIC_TEMPLATES_REDUCTION_TEMPLATE_HPP
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00027 #include <vector>
00028
00029 #include "viennacl/backend/opencl.hpp"
00030
00031 #include "viennacl/scheduler/forwards.h"
00032 #include "viennacl/device_specific/tree_parsing.hpp"
00033 #include "viennacl/device_specific/utils.hpp"
00034
00035 #include "viennacl/device_specific/templates/template_base.hpp"
00036 #include "viennacl/device_specific/templates/utils.hpp"
00037
00038 #include "viennacl/tools/tools.hpp"
00039
00040 namespace viennacl
00041 {
00042 namespace device_specific
00043 {
00044
00045 struct reduction_parameters : public template_base::parameters_type
00046 {
00047 reduction_parameters(unsigned int _simd_width,
00048 unsigned int _group_size, unsigned int _num_groups,
00049 fetching_policy_type _fetching_policy) : template_base::parameters_type(_simd_width, _group_size, 1, 2), num_groups(_num_groups), fetching_policy(_fetching_policy){ }
00050
00051 unsigned int num_groups;
00052 fetching_policy_type fetching_policy;
00053 };
00054
00055 class reduction_template : public template_base_impl<reduction_template, reduction_parameters>
00056 {
00057
00058 private:
00059 unsigned int n_lmem_elements() const
00060 {
00061 return p_.local_size_0;
00062 }
00063
00064 int check_invalid_impl(viennacl::ocl::device const & ) const
00065 {
00066 if (p_.fetching_policy==FETCH_FROM_LOCAL)
00067 return TEMPLATE_INVALID_FETCHING_POLICY_TYPE;
00068 return TEMPLATE_VALID;
00069 }
00070
00071 inline void reduce_1d_local_memory(utils::kernel_generation_stream & stream, unsigned int size, std::vector<mapped_scalar_reduction*> exprs,
00072 std::string const & buf_str, std::string const & buf_value_str) const
00073 {
00074 stream << "#pragma unroll" << std::endl;
00075 stream << "for(unsigned int stride = " << size/2 << "; stride >0; stride /=2)" << std::endl;
00076 stream << "{" << std::endl;
00077 stream.inc_tab();
00078 stream << "barrier(CLK_LOCAL_MEM_FENCE); " << std::endl;
00079 stream << "if (lid < stride)" << std::endl;
00080 stream << "{" << std::endl;
00081 stream.inc_tab();
00082
00083 for (unsigned int k = 0; k < exprs.size(); k++)
00084 if (exprs[k]->is_index_reduction())
00085 compute_index_reduction(stream, exprs[k]->process(buf_str+"[lid]"), exprs[k]->process(buf_str+"[lid+stride]")
00086 , exprs[k]->process(buf_value_str+"[lid]"), exprs[k]->process(buf_value_str+"[lid+stride]"),
00087 exprs[k]->root_op());
00088 else
00089 compute_reduction(stream, exprs[k]->process(buf_str+"[lid]"), exprs[k]->process(buf_str+"[lid+stride]"), exprs[k]->root_op());
00090 stream.dec_tab();
00091 stream << "}" << std::endl;
00092 stream.dec_tab();
00093 stream << "}" << std::endl;
00094 }
00095
00096 std::string generate_impl(std::string const & kernel_prefix, statements_container const & statements, std::vector<mapping_type> const & mappings, unsigned int simd_width) const
00097 {
00098 utils::kernel_generation_stream stream;
00099
00100 std::vector<mapped_scalar_reduction*> exprs;
00101 for (std::vector<mapping_type>::const_iterator it = mappings.begin(); it != mappings.end(); ++it)
00102 for (mapping_type::const_iterator iit = it->begin(); iit != it->end(); ++iit)
00103 if (mapped_scalar_reduction * p = dynamic_cast<mapped_scalar_reduction*>(iit->second.get()))
00104 exprs.push_back(p);
00105 vcl_size_t N = exprs.size();
00106
00107 std::string arguments = generate_value_kernel_argument("unsigned int", "N");
00108 for (unsigned int k = 0; k < N; ++k)
00109 {
00110 std::string numeric_type = utils::numeric_type_to_string(lhs_most(exprs[k]->statement().array(),
00111 exprs[k]->statement().root()).lhs.numeric_type);
00112 if (exprs[k]->is_index_reduction())
00113 {
00114 arguments += generate_pointer_kernel_argument("__global", "unsigned int", exprs[k]->process("#name_temp"));
00115 arguments += generate_pointer_kernel_argument("__global", numeric_type, exprs[k]->process("#name_temp_value"));
00116 }
00117 else
00118 arguments += generate_pointer_kernel_argument("__global", numeric_type, exprs[k]->process("#name_temp"));
00119 }
00120
00121
00122
00123
00124
00125 stream << " __attribute__((reqd_work_group_size(" << p_.local_size_0 << ",1,1)))" << std::endl;
00126 generate_prototype(stream, kernel_prefix + "_0", arguments, mappings, statements);
00127 stream << "{" << std::endl;
00128 stream.inc_tab();
00129
00130 tree_parsing::process(stream, PARENT_NODE_TYPE, "scalar", "#scalartype #namereg = *#pointer;", statements, mappings);
00131 stream << "unsigned int lid = get_local_id(0);" << std::endl;
00132 tree_parsing::process(stream, PARENT_NODE_TYPE, "vector", "#pointer += #start;", statements, mappings);
00133
00134 for (unsigned int k = 0; k < N; ++k)
00135 {
00136 if (exprs[k]->is_index_reduction())
00137 {
00138 stream << exprs[k]->process("__local #scalartype #name_buf_value[" + tools::to_string(p_.local_size_0) + "];") << std::endl;
00139 stream << exprs[k]->process("#scalartype #name_acc_value = " + neutral_element(exprs[k]->root_op()) + ";") << std::endl;
00140 stream << exprs[k]->process("__local unsigned int #name_buf[" + tools::to_string(p_.local_size_0) + "];") << std::endl;
00141 stream << exprs[k]->process("unsigned int #name_acc = 0;") << std::endl;
00142 }
00143 else
00144 {
00145 stream << exprs[k]->process("__local #scalartype #name_buf[" + tools::to_string(p_.local_size_0) + "];") << std::endl;
00146 stream << exprs[k]->process("#scalartype #name_acc = " + neutral_element(exprs[k]->root_op()) + ";") << std::endl;
00147 }
00148 }
00149
00150 class loop_body : public loop_body_base
00151 {
00152 public:
00153 loop_body(std::vector<mapped_scalar_reduction*> const & exprs_) : exprs(exprs_){ }
00154
00155 void operator()(utils::kernel_generation_stream & kernel_stream, unsigned int loop_simd_width) const
00156 {
00157 std::string i = (loop_simd_width==1)?"i*#stride":"i";
00158 std::string process_str;
00159
00160 {
00161 std::set<std::string> already_fetched;
00162 process_str = utils::append_width("#scalartype",loop_simd_width) + " #namereg = " + vload(loop_simd_width,i,"#pointer")+";";
00163 for (std::vector<mapped_scalar_reduction*>::const_iterator it = exprs.begin(); it != exprs.end(); ++it)
00164 {
00165 (*it)->process_recursive(kernel_stream, PARENT_NODE_TYPE, "vector", process_str, already_fetched);
00166 (*it)->process_recursive(kernel_stream, PARENT_NODE_TYPE, "matrix_row", "#scalartype #namereg = #pointer[$OFFSET{#row*#stride1, i*#stride2}];", already_fetched);
00167 (*it)->process_recursive(kernel_stream, PARENT_NODE_TYPE, "matrix_column", "#scalartype #namereg = #pointer[$OFFSET{i*#stride1,#column*#stride2}];", already_fetched);
00168 (*it)->process_recursive(kernel_stream, PARENT_NODE_TYPE, "matrix_diag", "#scalartype #namereg = #pointer[#diag_offset<0?$OFFSET{(i - #diag_offset)*#stride1, i*#stride2}:$OFFSET{i*#stride1, (i + #diag_offset)*#stride2}];", already_fetched);
00169 }
00170 }
00171
00172
00173 std::vector<std::string> str(loop_simd_width);
00174 if (loop_simd_width==1)
00175 str[0] = "#namereg";
00176 else
00177 for (unsigned int a = 0; a < loop_simd_width; ++a)
00178 str[a] = append_simd_suffix("#namereg.s", a);
00179
00180 for (unsigned int k = 0; k < exprs.size(); ++k)
00181 {
00182 for (unsigned int a = 0; a < loop_simd_width; ++a)
00183 {
00184 std::map<std::string, std::string> accessors;
00185 accessors["vector"] = str[a];
00186 accessors["matrix_row"] = str[a];
00187 accessors["matrix_column"] = str[a];
00188 accessors["matrix_diag"] = str[a];
00189 accessors["scalar"] = "#namereg";
00190 std::string value = exprs[k]->evaluate_recursive(LHS_NODE_TYPE, accessors);
00191 if (exprs[k]->root_node().op.type==scheduler::OPERATION_BINARY_INNER_PROD_TYPE)
00192 value+= "*" + exprs[k]->evaluate_recursive(RHS_NODE_TYPE, accessors);
00193
00194 if (exprs[k]->is_index_reduction())
00195 compute_index_reduction(kernel_stream, exprs[k]->process("#name_acc"), "i*"+tools::to_string(loop_simd_width) + "+" + tools::to_string(a), exprs[k]->process("#name_acc_value"), value,exprs[k]->root_op());
00196 else
00197 compute_reduction(kernel_stream, exprs[k]->process("#name_acc"), value,exprs[k]->root_op());
00198 }
00199 }
00200 }
00201
00202 private:
00203 std::vector<mapped_scalar_reduction*> exprs;
00204 };
00205
00206 element_wise_loop_1D(stream, loop_body(exprs), p_.fetching_policy, simd_width, "i", "N", "get_global_id(0)", "get_global_size(0)");
00207
00208
00209 for (unsigned int k = 0; k < N; ++k)
00210 {
00211 if (exprs[k]->is_index_reduction())
00212 stream << exprs[k]->process("#name_buf_value[lid] = #name_acc_value;") << std::endl;
00213 stream << exprs[k]->process("#name_buf[lid] = #name_acc;") << std::endl;
00214 }
00215
00216
00217 reduce_1d_local_memory(stream, p_.local_size_0, exprs, "#name_buf", "#name_buf_value");
00218
00219
00220 stream << "if (lid==0)" << std::endl;
00221 stream << "{" << std::endl;
00222 stream.inc_tab();
00223 for (unsigned int k = 0; k < N; ++k)
00224 {
00225 if (exprs[k]->is_index_reduction())
00226 stream << exprs[k]->process("#name_temp_value[get_group_id(0)] = #name_buf_value[0];") << std::endl;
00227 stream << exprs[k]->process("#name_temp[get_group_id(0)] = #name_buf[0];") << std::endl;
00228 }
00229 stream.dec_tab();
00230 stream << "}" << std::endl;
00231
00232 stream.dec_tab();
00233 stream << "}" << std::endl;
00234
00235
00236
00237
00238 stream << " __attribute__((reqd_work_group_size(" << p_.local_size_0 << ",1,1)))" << std::endl;
00239 generate_prototype(stream, kernel_prefix + "_1", arguments, mappings, statements);
00240 stream << "{" << std::endl;
00241 stream.inc_tab();
00242
00243 stream << "unsigned int lid = get_local_id(0);" << std::endl;
00244
00245 for (unsigned int k = 0; k < N; ++k)
00246 {
00247 if (exprs[k]->is_index_reduction())
00248 {
00249 stream << exprs[k]->process("__local unsigned int #name_buf[" + tools::to_string(p_.local_size_0) + "];");
00250 stream << exprs[k]->process("unsigned int #name_acc = 0;") << std::endl;
00251 stream << exprs[k]->process("__local #scalartype #name_buf_value[" + tools::to_string(p_.local_size_0) + "];") << std::endl;
00252 stream << exprs[k]->process("#scalartype #name_acc_value = " + neutral_element(exprs[k]->root_op()) + ";");
00253 }
00254 else
00255 {
00256 stream << exprs[k]->process("__local #scalartype #name_buf[" + tools::to_string(p_.local_size_0) + "];") << std::endl;
00257 stream << exprs[k]->process("#scalartype #name_acc = " + neutral_element(exprs[k]->root_op()) + ";");
00258 }
00259 }
00260
00261 stream << "for(unsigned int i = lid; i < " << p_.num_groups << "; i += get_local_size(0))" << std::endl;
00262 stream << "{" << std::endl;
00263 stream.inc_tab();
00264 for (unsigned int k = 0; k < N; ++k)
00265 if (exprs[k]->is_index_reduction())
00266 compute_index_reduction(stream, exprs[k]->process("#name_acc"), exprs[k]->process("#name_temp[i]"),
00267 exprs[k]->process("#name_acc_value"),exprs[k]->process("#name_temp_value[i]"),exprs[k]->root_op());
00268 else
00269 compute_reduction(stream, exprs[k]->process("#name_acc"), exprs[k]->process("#name_temp[i]"), exprs[k]->root_op());
00270
00271 stream.dec_tab();
00272 stream << "}" << std::endl;
00273
00274 for (unsigned int k = 0; k < N; ++k)
00275 {
00276 if (exprs[k]->is_index_reduction())
00277 stream << exprs[k]->process("#name_buf_value[lid] = #name_acc_value;") << std::endl;
00278 stream << exprs[k]->process("#name_buf[lid] = #name_acc;") << std::endl;
00279 }
00280
00281
00282
00283 reduce_1d_local_memory(stream, p_.local_size_0, exprs, "#name_buf", "#name_buf_value");
00284
00285 stream << "if (lid==0)" << std::endl;
00286 stream << "{" << std::endl;
00287 stream.inc_tab();
00288 std::map<std::string, std::string> accessors;
00289 accessors["scalar_reduction"] = "#name_buf[0]";
00290 accessors["scalar"] = "*#pointer";
00291 accessors["vector"] = "#pointer[#start]";
00292 tree_parsing::evaluate(stream, PARENT_NODE_TYPE, accessors, statements, mappings);
00293 stream.dec_tab();
00294 stream << "}" << std::endl;
00295
00296 stream.dec_tab();
00297 stream << "}" << std::endl;
00298
00299 return stream.str();
00300 }
00301
00302 std::vector<std::string> generate_impl(std::string const & kernel_prefix, statements_container const & statements, std::vector<mapping_type> const & mappings) const
00303 {
00304 std::vector<std::string> result;
00305 result.push_back(generate_impl(kernel_prefix + "_strided", statements, mappings, 1));
00306 result.push_back(generate_impl(kernel_prefix, statements, mappings, p_.simd_width));
00307 return result;
00308 }
00309 public:
00310 reduction_template(reduction_template::parameters_type const & parameters, binding_policy_t binding_policy = BIND_ALL_UNIQUE) : template_base_impl<reduction_template, reduction_parameters>(parameters, binding_policy) { }
00311
00312 void enqueue(std::string const & kernel_prefix, std::vector<lazy_program_compiler> & programs, statements_container const & statements)
00313 {
00314 std::vector<scheduler::statement_node const *> reductions;
00315 cl_uint size = 0;
00316 for (statements_container::data_type::const_iterator it = statements.data().begin(); it != statements.data().end(); ++it)
00317 {
00318 std::vector<vcl_size_t> reductions_idx;
00319 tree_parsing::traverse(*it, it->root(), tree_parsing::filter(&utils::is_reduction, reductions_idx), false);
00320 size = static_cast<cl_uint>(vector_size(lhs_most(it->array(), reductions_idx[0]), false));
00321 for (std::vector<vcl_size_t>::iterator itt = reductions_idx.begin(); itt != reductions_idx.end(); ++itt)
00322 reductions.push_back(&it->array()[*itt]);
00323 }
00324
00325 scheduler::statement const & statement = statements.data().front();
00326 unsigned int scalartype_size = utils::size_of(lhs_most(statement.array(), statement.root()).lhs.numeric_type);
00327
00328 viennacl::ocl::kernel * kernels[2];
00329 if (has_strided_access(statements) && p_.simd_width > 1)
00330 {
00331 kernels[0] = &programs[0].program().get_kernel(kernel_prefix+"_strided_0");
00332 kernels[1] = &programs[0].program().get_kernel(kernel_prefix+"_strided_1");
00333 }
00334 else
00335 {
00336 kernels[0] = &programs[1].program().get_kernel(kernel_prefix+"_0");
00337 kernels[1] = &programs[1].program().get_kernel(kernel_prefix+"_1");
00338 }
00339
00340 kernels[0]->local_work_size(0, p_.local_size_0);
00341 kernels[0]->global_work_size(0,p_.local_size_0*p_.num_groups);
00342
00343 kernels[1]->local_work_size(0, p_.local_size_0);
00344 kernels[1]->global_work_size(0,p_.local_size_0);
00345
00346 for (unsigned int k = 0; k < 2; k++)
00347 {
00348 unsigned int n_arg = 0;
00349 kernels[k]->arg(n_arg++, size);
00350 unsigned int i = 0;
00351 unsigned int j = 0;
00352 for (std::vector<scheduler::statement_node const *>::const_iterator it = reductions.begin(); it != reductions.end(); ++it)
00353 {
00354 if (utils::is_index_reduction((*it)->op))
00355 {
00356 if (tmpidx_.size() <= j)
00357 tmpidx_.push_back(kernels[k]->context().create_memory(CL_MEM_READ_WRITE, p_.num_groups*4));
00358 kernels[k]->arg(n_arg++, tmpidx_[j]);
00359 j++;
00360 }
00361
00362 if (tmp_.size() <= i)
00363 tmp_.push_back(kernels[k]->context().create_memory(CL_MEM_READ_WRITE, p_.num_groups*scalartype_size));
00364 kernels[k]->arg(n_arg++, tmp_[i]);
00365 i++;
00366 }
00367 set_arguments(statements, *kernels[k], n_arg);
00368 }
00369
00370 for (unsigned int k = 0; k < 2; k++)
00371 viennacl::ocl::enqueue(*kernels[k]);
00372
00373 }
00374
00375 private:
00376 std::vector< viennacl::ocl::handle<cl_mem> > tmp_;
00377 std::vector< viennacl::ocl::handle<cl_mem> > tmpidx_;
00378 };
00379
00380 }
00381 }
00382
00383 #endif