Alien-XGBoost
view release on metacpan or search on metacpan
xgboost/cub/cub/thread/thread_load.cuh view on Meta::CPAN
//-----------------------------------------------------------------------------
/**
* \brief Enumeration of cache modifiers for memory load operations.
*/
enum CacheLoadModifier
{
LOAD_DEFAULT, ///< Default (no modifier)
LOAD_CA, ///< Cache at all levels
LOAD_CG, ///< Cache at global level
LOAD_CS, ///< Cache streaming (likely to be accessed once)
LOAD_CV, ///< Cache as volatile (including cached system lines)
LOAD_LDG, ///< Cache as texture
LOAD_VOLATILE, ///< Volatile (any memory space)
};
/**
* \name Thread I/O (cache modified)
* @{
*/
xgboost/cub/cub/thread/thread_load.cuh view on Meta::CPAN
* int val = cub::ThreadLoad<cub::LOAD_CA>(d_in + threadIdx.x);
*
* // 16-bit load using default modifier
* short *d_in;
* short val = cub::ThreadLoad<cub::LOAD_DEFAULT>(d_in + threadIdx.x);
*
* // 256-bit load using cache-volatile modifier
* double4 *d_in;
* double4 val = cub::ThreadLoad<cub::LOAD_CV>(d_in + threadIdx.x);
*
* // 96-bit load using cache-streaming modifier
* struct TestFoo { bool a; short b; };
* TestFoo *d_struct;
* TestFoo val = cub::ThreadLoad<cub::LOAD_CS>(d_in + threadIdx.x);
* \endcode
*
* \tparam MODIFIER <b>[inferred]</b> CacheLoadModifier enumeration
* \tparam InputIteratorT <b>[inferred]</b> Input iterator type \iterator
*/
template <
CacheLoadModifier MODIFIER,
xgboost/cub/cub/thread/thread_store.cuh view on Meta::CPAN
//-----------------------------------------------------------------------------
/**
* \brief Enumeration of cache modifiers for memory store operations.
*/
enum CacheStoreModifier
{
STORE_DEFAULT, ///< Default (no modifier)
STORE_WB, ///< Cache write-back all coherent levels
STORE_CG, ///< Cache at global level
STORE_CS, ///< Cache streaming (likely to be accessed once)
STORE_WT, ///< Cache write-through (to system memory)
STORE_VOLATILE, ///< Volatile shared (any memory space)
};
/**
* \name Thread I/O (cache modified)
* @{
*/
xgboost/cub/cub/thread/thread_store.cuh view on Meta::CPAN
* // 16-bit store using default modifier
* short *d_out;
* short val;
* cub::ThreadStore<cub::STORE_DEFAULT>(d_out + threadIdx.x, val);
*
* // 256-bit store using write-through modifier
* double4 *d_out;
* double4 val;
* cub::ThreadStore<cub::STORE_WT>(d_out + threadIdx.x, val);
*
* // 96-bit store using cache-streaming cache modifier
* struct TestFoo { bool a; short b; };
* TestFoo *d_struct;
* TestFoo val;
* cub::ThreadStore<cub::STORE_CS>(d_out + threadIdx.x, val);
* \endcode
*
* \tparam MODIFIER <b>[inferred]</b> CacheStoreModifier enumeration
* \tparam InputIteratorT <b>[inferred]</b> Output iterator type \iterator
* \tparam T <b>[inferred]</b> Data type of output value
*/
xgboost/cub/cub/util_type.cuh view on Meta::CPAN
/// Statically-sized array of type \p T
T array[COUNT];
/// Constructor
__host__ __device__ __forceinline__ ArrayWrapper() {}
};
#endif // DOXYGEN_SHOULD_SKIP_THIS
/**
* \brief Double-buffer storage wrapper for multi-pass stream transformations that require more than one storage array for streaming intermediate results back and forth.
*
* Many multi-pass computations require a pair of "ping-pong" storage
* buffers (e.g., one for reading from and the other for writing to, and then
* vice-versa for the subsequent pass). This structure wraps a set of device
* buffers and a "selector" member to track which is "current".
*/
template <typename T>
struct DoubleBuffer
{
/// Pair of device buffer pointers
xgboost/dmlc-core/include/dmlc/data.h view on Meta::CPAN
CHECK(index[i] < size) << "feature index exceed bound";
sum += weight[index[i]] * value[i];
}
}
return sum;
}
};
/*!
* \brief a block of data, containing several rows in sparse matrix
* This is useful for (streaming-sxtyle) algorithms that scans through rows of data
* examples include: SGD, GD, L-BFGS, kmeans
*
* The size of batch is usually large enough so that parallelizing over the rows
* can give significant speedup
* \tparam IndexType type to store the index used in row batch
*/
template<typename IndexType>
struct RowBlock {
/*! \brief batch size */
size_t size;
xgboost/doc/jvm/xgboost4j_full_integration.md view on Meta::CPAN
The integrations with Spark/Flink, a.k.a. <b>XGBoost4J-Spark</b> and <b>XGBoost-Flink</b>, receive the tremendous positive feedbacks from the community. It enables users to build a unified pipeline, embedding XGBoost into the data processing system ...

In the last months, we have a lot of communication with the users and gain the deeper understanding of the users' latest usage scenario and requirements:
* XGBoost keeps gaining more and more deployments in the production environment and the adoption in machine learning competitions [Link](http://datascience.la/xgboost-workshop-and-meetup-talk-with-tianqi-chen/).
* While Spark is still the mainstream data processing tool in most of scenarios, more and more users are porting their RDD-based Spark programs to [DataFrame/Dataset APIs](http://spark.apache.org/docs/latest/sql-programming-guide.html) for the well-d...
* Spark itself has presented a clear roadmap that DataFrame/Dataset would be the base of the latest and future features, e.g. latest version of [ML pipeline](http://spark.apache.org/docs/latest/ml-guide.html) and [Structured Streaming](http://spark.a...
Based on these feedbacks from the users, we observe a gap between the original RDD-based XGBoost4J-Spark and the users' latest usage scenario as well as the future direction of Spark ecosystem. To fill this gap, we start working on the <b><i>integrat...
## A Full Integration of XGBoost and DataFrame/Dataset
The following figure illustrates the new pipeline architecture with the latest XGBoost4J-Spark.

( run in 0.252 second using v1.01-cache-2.11-cpan-a5abf4f5562 )