view release on metacpan or search on metacpan
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
xgboost/R-package/R/xgb.create.features.R view on Meta::CPAN
#' @details
#' This is the function inspired from the paragraph 3.1 of the paper:
#'
#' \strong{Practical Lessons from Predicting Clicks on Ads at Facebook}
#'
#' \emph{(Xinran He, Junfeng Pan, Ou Jin, Tianbing Xu, Bo Liu, Tao Xu, Yan, xin Shi, Antoine Atallah, Ralf Herbrich, Stuart Bowers,
#' Joaquin Quinonero Candela)}
#'
#' International Workshop on Data Mining for Online Advertising (ADKDD) - August 24, 2014
#'
#' \url{https://research.fb.com/publications/practical-lessons-from-predicting-clicks-on-ads-at-facebook/}.
#'
#' Extract explaining the method:
#'
#' "We found that boosted decision trees are a powerful and very
#' convenient way to implement non-linear and tuple transformations
#' of the kind we just described. We treat each individual
#' tree as a categorical feature that takes as value the
#' index of the leaf an instance ends up falling in. We use
#' 1-of-K coding of this type of features.
#'
xgboost/R-package/man/xgb.create.features.Rd view on Meta::CPAN
\details{
This is the function inspired from the paragraph 3.1 of the paper:
\strong{Practical Lessons from Predicting Clicks on Ads at Facebook}
\emph{(Xinran He, Junfeng Pan, Ou Jin, Tianbing Xu, Bo Liu, Tao Xu, Yan, xin Shi, Antoine Atallah, Ralf Herbrich, Stuart Bowers,
Joaquin Quinonero Candela)}
International Workshop on Data Mining for Online Advertising (ADKDD) - August 24, 2014
\url{https://research.fb.com/publications/practical-lessons-from-predicting-clicks-on-ads-at-facebook/}.
Extract explaining the method:
"We found that boosted decision trees are a powerful and very
convenient way to implement non-linear and tuple transformations
of the kind we just described. We treat each individual
tree as a categorical feature that takes as value the
index of the leaf an instance ends up falling in. We use
1-of-K coding of this type of features.
xgboost/cub/README.md view on Meta::CPAN
could be aliased to global memory allocations).
<br><hr>
<h3>Stable Releases</h3>
CUB releases are labeled using version identifiers having three fields:
*epoch.feature.update*. The *epoch* field corresponds to support for
a major change in the CUDA programming model. The *feature* field
corresponds to a stable set of features, functionality, and interface. The
*update* field corresponds to a bug-fix or performance update for that
feature set. At the moment, we do not publicly provide non-stable releases
such as development snapshots, beta releases or rolling releases. (Feel free
to contact us if you would like such things.) See the
[CUB Project Website](http://nvlabs.github.com/cub) for more information.
<br><hr>
<h3>Contributors</h3>
CUB is developed as an open-source project by [NVIDIA Research](http://research.nvidia.com). The primary contributor is [Duane Merrill](http://github.com/dumerrill).
<br><hr>
xgboost/cub/cub/block/block_adjacent_difference.cuh view on Meta::CPAN
* Thread fields
******************************************************************************/
/// Shared storage reference
_TempStorage &temp_storage;
/// Linear thread-id
unsigned int linear_tid;
public:
/// \smemstorage{BlockDiscontinuity}
struct TempStorage : Uninitialized<_TempStorage> {};
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
xgboost/cub/cub/block/block_discontinuity.cuh view on Meta::CPAN
* Thread fields
******************************************************************************/
/// Shared storage reference
_TempStorage &temp_storage;
/// Linear thread-id
unsigned int linear_tid;
public:
/// \smemstorage{BlockDiscontinuity}
struct TempStorage : Uninitialized<_TempStorage> {};
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
xgboost/cub/cub/block/block_exchange.cuh view on Meta::CPAN
/******************************************************************************
* Type definitions
******************************************************************************/
/// Shared memory storage layout type
struct __align__(16) _TempStorage
{
InputT buff[TIME_SLICED_ITEMS + PADDING_ITEMS];
};
public:
/// \smemstorage{BlockExchange}
struct TempStorage : Uninitialized<_TempStorage> {};
private:
/******************************************************************************
* Thread fields
******************************************************************************/
xgboost/cub/cub/block/block_exchange.cuh view on Meta::CPAN
// Copy
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
output_items[ITEM] = temp_items[ITEM];
}
}
public:
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
/**
* \brief Collective constructor using a private static allocation of shared memory as temporary storage.
*/
__device__ __forceinline__ BlockExchange()
xgboost/cub/cub/block/block_exchange.cuh view on Meta::CPAN
/******************************************************************************
* Type definitions
******************************************************************************/
/// Shared memory storage layout type
struct _TempStorage
{
T buff[WARP_ITEMS + PADDING_ITEMS];
};
public:
/// \smemstorage{WarpExchange}
struct TempStorage : Uninitialized<_TempStorage> {};
private:
/******************************************************************************
* Thread fields
******************************************************************************/
_TempStorage &temp_storage;
int lane_id;
public:
/******************************************************************************
* Construction
******************************************************************************/
/// Constructor
__device__ __forceinline__ WarpExchange(
TempStorage &temp_storage)
:
temp_storage(temp_storage.Alias()),
xgboost/cub/cub/block/block_histogram.cuh view on Meta::CPAN
******************************************************************************/
/// Internal storage allocator
__device__ __forceinline__ _TempStorage& PrivateStorage()
{
__shared__ _TempStorage private_storage;
return private_storage;
}
public:
/// \smemstorage{BlockHistogram}
struct TempStorage : Uninitialized<_TempStorage> {};
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
xgboost/cub/cub/block/block_load.cuh view on Meta::CPAN
/******************************************************************************
* Thread fields
******************************************************************************/
/// Thread reference to shared storage
_TempStorage &temp_storage;
/// Linear thread-id
int linear_tid;
public:
/// \smemstorage{BlockLoad}
struct TempStorage : Uninitialized<_TempStorage> {};
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
xgboost/cub/cub/block/block_radix_rank.cuh view on Meta::CPAN
// Compute exclusive sum
PackedCounter exclusive_partial;
PrefixCallBack prefix_call_back;
BlockScan(temp_storage.block_scan).ExclusiveSum(raking_partial, exclusive_partial, prefix_call_back);
// Downsweep scan with exclusive partial
ExclusiveDownsweep(exclusive_partial);
}
public:
/// \smemstorage{BlockScan}
struct TempStorage : Uninitialized<_TempStorage> {};
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
xgboost/cub/cub/block/block_radix_sort.cuh view on Meta::CPAN
}
// Untwiddle bits if necessary
#pragma unroll
for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++)
{
unsigned_keys[KEY] = KeyTraits::TwiddleOut(unsigned_keys[KEY]);
}
}
public:
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
/// Sort blocked -> striped arrangement
template <int DESCENDING, int KEYS_ONLY>
__device__ __forceinline__ void SortBlockedToStriped(
KeyT (&keys)[ITEMS_PER_THREAD], ///< Keys to sort
ValueT (&values)[ITEMS_PER_THREAD], ///< Values to sort
int begin_bit, ///< The beginning (least-significant) bit index needed for key comparison
int end_bit, ///< The past-the-end (most-significant) bit index needed for key comparison
xgboost/cub/cub/block/block_reduce.cuh view on Meta::CPAN
* Thread fields
******************************************************************************/
/// Shared storage reference
_TempStorage &temp_storage;
/// Linear thread-id
unsigned int linear_tid;
public:
/// \smemstorage{BlockReduce}
struct TempStorage : Uninitialized<_TempStorage> {};
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
xgboost/cub/cub/block/block_scan.cuh view on Meta::CPAN
__device__ __forceinline__ _TempStorage& PrivateStorage()
{
__shared__ _TempStorage private_storage;
return private_storage;
}
/******************************************************************************
* Public types
******************************************************************************/
public:
/// \smemstorage{BlockScan}
struct TempStorage : Uninitialized<_TempStorage> {};
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
xgboost/cub/cub/block/block_shuffle.cuh view on Meta::CPAN
******************************************************************************/
/// Shared memory storage layout type (last element from each thread's input)
struct _TempStorage
{
T prev[BLOCK_THREADS];
T next[BLOCK_THREADS];
};
public:
/// \smemstorage{BlockShuffle}
struct TempStorage : Uninitialized<_TempStorage> {};
private:
/******************************************************************************
* Thread fields
******************************************************************************/
xgboost/cub/cub/block/block_shuffle.cuh view on Meta::CPAN
******************************************************************************/
/// Internal storage allocator
__device__ __forceinline__ _TempStorage& PrivateStorage()
{
__shared__ _TempStorage private_storage;
return private_storage;
}
public:
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
/**
* \brief Collective constructor using a private static allocation of shared memory as temporary storage.
*/
__device__ __forceinline__ BlockShuffle()
xgboost/cub/cub/block/block_store.cuh view on Meta::CPAN
/******************************************************************************
* Thread fields
******************************************************************************/
/// Thread reference to shared storage
_TempStorage &temp_storage;
/// Linear thread-id
int linear_tid;
public:
/// \smemstorage{BlockStore}
struct TempStorage : Uninitialized<_TempStorage> {};
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
xgboost/cub/cub/device/device_scan.cuh view on Meta::CPAN
* \par
* As of CUB 1.0.1 (2013), CUB's device-wide scan APIs have implemented our <em>"decoupled look-back"</em> algorithm
* for performing global prefix scan with only a single pass through the
* input data, as described in our 2016 technical report [1]. The central
* idea is to leverage a small, constant factor of redundant work in order to overlap the latencies
* of global prefix propagation with local computation. As such, our algorithm requires only
* ~2<em>n</em> data movement (<em>n</em> inputs are read, <em>n</em> outputs are written), and typically
* proceeds at "memcpy" speeds.
*
* \par
* [1] [Duane Merrill and Michael Garland. "Single-pass Parallel Prefix Scan with Decoupled Look-back", <em>NVIDIA Technical Report NVR-2016-002</em>, 2016.](https://research.nvidia.com/publication/single-pass-parallel-prefix-scan-decoupled-look-bac...
*
* \par Usage Considerations
* \cdp_class{DeviceScan}
*
* \par Performance
* \linear_performance{prefix scan}
*
* \par
* The following chart illustrates DeviceScan::ExclusiveSum
* performance across different CUDA architectures for \p int32 keys.
xgboost/cub/cub/grid/grid_barrier.cuh view on Meta::CPAN
*/
class GridBarrier
{
protected :
typedef unsigned int SyncFlag;
// Counters in global device memory
SyncFlag* d_sync;
public:
/**
* Constructor
*/
GridBarrier() : d_sync(NULL) {}
/**
* Synchronize
*/
xgboost/cub/cub/grid/grid_barrier.cuh view on Meta::CPAN
}
};
/**
* \brief GridBarrierLifetime extends GridBarrier to provide lifetime management of the temporary device storage needed for cooperation.
*
* Uses RAII for lifetime, i.e., device resources are reclaimed when
* the destructor is called.
*/
class GridBarrierLifetime : public GridBarrier
{
protected:
// Number of bytes backed by d_sync
size_t sync_bytes;
public:
/**
* Constructor
*/
GridBarrierLifetime() : GridBarrier(), sync_bytes(0) {}
/**
* DeviceFrees and resets the progress counters
*/
xgboost/cub/cub/grid/grid_queue.cuh view on Meta::CPAN
/// Counter indices
enum
{
FILL = 0,
DRAIN = 1,
};
/// Pair of counters
OffsetT *d_counters;
public:
/// Returns the device allocation size in bytes needed to construct a GridQueue instance
__host__ __device__ __forceinline__
static size_t AllocationSize()
{
return sizeof(OffsetT) * 2;
}
/// Constructs an invalid GridQueue descriptor
xgboost/cub/cub/iterator/arg_index_input_iterator.cuh view on Meta::CPAN
* \tparam InputIteratorT The value type of the wrapped input iterator
* \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t)
* \tparam OutputValueT The paired value type of the <offset,value> tuple (Default: value type of input iterator)
*/
template <
typename InputIteratorT,
typename OffsetT = ptrdiff_t,
typename OutputValueT = typename std::iterator_traits<InputIteratorT>::value_type>
class ArgIndexInputIterator
{
public:
// Required iterator traits
typedef ArgIndexInputIterator self_type; ///< My own type
typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another
typedef KeyValuePair<difference_type, OutputValueT> value_type; ///< The type of the element the iterator can point to
typedef value_type* pointer; ///< The type of a pointer to an element the iterator can point to
typedef value_type reference; ///< The type of a reference to an element the iterator can point to
#if (THRUST_VERSION >= 100700)
// Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods
xgboost/cub/cub/iterator/arg_index_input_iterator.cuh view on Meta::CPAN
>::type iterator_category; ///< The iterator category
#else
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
#endif // THRUST_VERSION
private:
InputIteratorT itr;
difference_type offset;
public:
/// Constructor
__host__ __device__ __forceinline__ ArgIndexInputIterator(
InputIteratorT itr, ///< Input iterator to wrap
difference_type offset = 0) ///< OffsetT (in items) from \p itr denoting the position of the iterator
:
itr(itr),
offset(offset)
{}
xgboost/cub/cub/iterator/cache_modified_input_iterator.cuh view on Meta::CPAN
* \tparam CacheLoadModifier The cub::CacheLoadModifier to use when accessing data
* \tparam ValueType The value type of this iterator
* \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t)
*/
template <
CacheLoadModifier MODIFIER,
typename ValueType,
typename OffsetT = ptrdiff_t>
class CacheModifiedInputIterator
{
public:
// Required iterator traits
typedef CacheModifiedInputIterator self_type; ///< My own type
typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another
typedef ValueType value_type; ///< The type of the element the iterator can point to
typedef ValueType* pointer; ///< The type of a pointer to an element the iterator can point to
typedef ValueType reference; ///< The type of a reference to an element the iterator can point to
#if (THRUST_VERSION >= 100700)
// Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods
xgboost/cub/cub/iterator/cache_modified_input_iterator.cuh view on Meta::CPAN
thrust::device_system_tag,
thrust::random_access_traversal_tag,
value_type,
reference
>::type iterator_category; ///< The iterator category
#else
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
#endif // THRUST_VERSION
public:
/// Wrapped native pointer
ValueType* ptr;
/// Constructor
template <typename QualifiedValueType>
__host__ __device__ __forceinline__ CacheModifiedInputIterator(
QualifiedValueType* ptr) ///< Native pointer to wrap
:
ptr(const_cast<typename RemoveQualifiers<QualifiedValueType>::Type *>(ptr))
xgboost/cub/cub/iterator/cache_modified_output_iterator.cuh view on Meta::CPAN
__host__ __device__ __forceinline__ Reference(ValueType* ptr) : ptr(ptr) {}
/// Assignment
__device__ __forceinline__ ValueType operator =(ValueType val)
{
ThreadStore<MODIFIER>(ptr, val);
return val;
}
};
public:
// Required iterator traits
typedef CacheModifiedOutputIterator self_type; ///< My own type
typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another
typedef void value_type; ///< The type of the element the iterator can point to
typedef void pointer; ///< The type of a pointer to an element the iterator can point to
typedef Reference reference; ///< The type of a reference to an element the iterator can point to
#if (THRUST_VERSION >= 100700)
// Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods
xgboost/cub/cub/iterator/cache_modified_output_iterator.cuh view on Meta::CPAN
reference
>::type iterator_category; ///< The iterator category
#else
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
#endif // THRUST_VERSION
private:
ValueType* ptr;
public:
/// Constructor
template <typename QualifiedValueType>
__host__ __device__ __forceinline__ CacheModifiedOutputIterator(
QualifiedValueType* ptr) ///< Native pointer to wrap
:
ptr(const_cast<typename RemoveQualifiers<QualifiedValueType>::Type *>(ptr))
{}
/// Postfix increment
xgboost/cub/cub/iterator/constant_input_iterator.cuh view on Meta::CPAN
* \endcode
*
* \tparam ValueType The value type of this iterator
* \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t)
*/
template <
typename ValueType,
typename OffsetT = ptrdiff_t>
class ConstantInputIterator
{
public:
// Required iterator traits
typedef ConstantInputIterator self_type; ///< My own type
typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another
typedef ValueType value_type; ///< The type of the element the iterator can point to
typedef ValueType* pointer; ///< The type of a pointer to an element the iterator can point to
typedef ValueType reference; ///< The type of a reference to an element the iterator can point to
#if (THRUST_VERSION >= 100700)
// Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods
xgboost/cub/cub/iterator/constant_input_iterator.cuh view on Meta::CPAN
#endif // THRUST_VERSION
private:
ValueType val;
OffsetT offset;
#ifdef _WIN32
OffsetT pad[CUB_MAX(1, (16 / sizeof(OffsetT) - 1))]; // Workaround for win32 parameter-passing bug (ulonglong2 argmin DeviceReduce)
#endif
public:
/// Constructor
__host__ __device__ __forceinline__ ConstantInputIterator(
ValueType val, ///< Starting value for the iterator instance to report
OffsetT offset = 0) ///< Base offset
:
val(val),
offset(offset)
{}
xgboost/cub/cub/iterator/counting_input_iterator.cuh view on Meta::CPAN
* \endcode
*
* \tparam ValueType The value type of this iterator
* \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t)
*/
template <
typename ValueType,
typename OffsetT = ptrdiff_t>
class CountingInputIterator
{
public:
// Required iterator traits
typedef CountingInputIterator self_type; ///< My own type
typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another
typedef ValueType value_type; ///< The type of the element the iterator can point to
typedef ValueType* pointer; ///< The type of a pointer to an element the iterator can point to
typedef ValueType reference; ///< The type of a reference to an element the iterator can point to
#if (THRUST_VERSION >= 100700)
// Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods
xgboost/cub/cub/iterator/counting_input_iterator.cuh view on Meta::CPAN
reference
>::type iterator_category; ///< The iterator category
#else
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
#endif // THRUST_VERSION
private:
ValueType val;
public:
/// Constructor
__host__ __device__ __forceinline__ CountingInputIterator(
const ValueType &val) ///< Starting value for the iterator instance to report
:
val(val)
{}
/// Postfix increment
__host__ __device__ __forceinline__ self_type operator++(int)
xgboost/cub/cub/iterator/discard_output_iterator.cuh view on Meta::CPAN
* @{
*/
/**
* \brief A discard iterator
*/
template <typename OffsetT = ptrdiff_t>
class DiscardOutputIterator
{
public:
// Required iterator traits
typedef DiscardOutputIterator self_type; ///< My own type
typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another
typedef void value_type; ///< The type of the element the iterator can point to
typedef void pointer; ///< The type of a pointer to an element the iterator can point to
typedef void reference; ///< The type of a reference to an element the iterator can point to
#if (THRUST_VERSION >= 100700)
// Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods
xgboost/cub/cub/iterator/discard_output_iterator.cuh view on Meta::CPAN
private:
OffsetT offset;
#if defined(_WIN32) || !defined(_WIN64)
// Workaround for win32 parameter-passing bug (ulonglong2 argmin DeviceReduce)
OffsetT pad[CUB_MAX(1, (16 / sizeof(OffsetT) - 1))];
#endif
public:
/// Constructor
__host__ __device__ __forceinline__ DiscardOutputIterator(
OffsetT offset = 0) ///< Base offset
:
offset(offset)
{}
/// Postfix increment
__host__ __device__ __forceinline__ self_type operator++(int)
xgboost/cub/cub/iterator/tex_obj_input_iterator.cuh view on Meta::CPAN
* \endcode
*
* \tparam T The value type of this iterator
* \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t)
*/
template <
typename T,
typename OffsetT = ptrdiff_t>
class TexObjInputIterator
{
public:
// Required iterator traits
typedef TexObjInputIterator self_type; ///< My own type
typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another
typedef T value_type; ///< The type of the element the iterator can point to
typedef T* pointer; ///< The type of a pointer to an element the iterator can point to
typedef T reference; ///< The type of a reference to an element the iterator can point to
#if (THRUST_VERSION >= 100700)
// Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods
xgboost/cub/cub/iterator/tex_obj_input_iterator.cuh view on Meta::CPAN
enum {
TEXTURE_MULTIPLE = sizeof(T) / sizeof(TextureWord)
};
private:
T* ptr;
difference_type tex_offset;
cudaTextureObject_t tex_obj;
public:
/// Constructor
__host__ __device__ __forceinline__ TexObjInputIterator()
:
ptr(NULL),
tex_offset(0),
tex_obj(0)
{}
/// Use this iterator to bind \p ptr with a texture reference
xgboost/cub/cub/iterator/tex_ref_input_iterator.cuh view on Meta::CPAN
* \tparam T The value type of this iterator
* \tparam UNIQUE_ID A globally-unique identifier (within the compilation unit) to name the underlying texture reference
* \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t)
*/
template <
typename T,
int UNIQUE_ID,
typename OffsetT = ptrdiff_t>
class TexRefInputIterator
{
public:
// Required iterator traits
typedef TexRefInputIterator self_type; ///< My own type
typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another
typedef T value_type; ///< The type of the element the iterator can point to
typedef T* pointer; ///< The type of a pointer to an element the iterator can point to
typedef T reference; ///< The type of a reference to an element the iterator can point to
#if (THRUST_VERSION >= 100700)
// Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods
xgboost/cub/cub/iterator/tex_ref_input_iterator.cuh view on Meta::CPAN
#endif // THRUST_VERSION
private:
T* ptr;
difference_type tex_offset;
// Texture reference wrapper (old Tesla/Fermi-style textures)
typedef typename IteratorTexRef<T>::template TexId<UNIQUE_ID> TexId;
public:
/*
/// Constructor
__host__ __device__ __forceinline__ TexRefInputIterator()
:
ptr(NULL),
tex_offset(0)
{}
*/
/// Use this iterator to bind \p ptr with a texture reference
template <typename QualifiedT>
xgboost/cub/cub/iterator/transform_input_iterator.cuh view on Meta::CPAN
* \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t)
*
*/
template <
typename ValueType,
typename ConversionOp,
typename InputIteratorT,
typename OffsetT = ptrdiff_t>
class TransformInputIterator
{
public:
// Required iterator traits
typedef TransformInputIterator self_type; ///< My own type
typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another
typedef ValueType value_type; ///< The type of the element the iterator can point to
typedef ValueType* pointer; ///< The type of a pointer to an element the iterator can point to
typedef ValueType reference; ///< The type of a reference to an element the iterator can point to
#if (THRUST_VERSION >= 100700)
// Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods
xgboost/cub/cub/iterator/transform_input_iterator.cuh view on Meta::CPAN
>::type iterator_category; ///< The iterator category
#else
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
#endif // THRUST_VERSION
private:
ConversionOp conversion_op;
InputIteratorT input_itr;
public:
/// Constructor
__host__ __device__ __forceinline__ TransformInputIterator(
InputIteratorT input_itr, ///< Input iterator to wrap
ConversionOp conversion_op) ///< Conversion functor to wrap
:
conversion_op(conversion_op),
input_itr(input_itr)
{}
xgboost/cub/cub/thread/thread_operators.cuh view on Meta::CPAN
* \brief Binary operator wrapper for switching non-commutative scan arguments
*/
template <typename ScanOp>
class SwizzleScanOp
{
private:
/// Wrapped scan operator
ScanOp scan_op;
public:
/// Constructor
__host__ __device__ __forceinline__
SwizzleScanOp(ScanOp scan_op) : scan_op(scan_op) {}
/// Switch the scan arguments
template <typename T>
__host__ __device__ __forceinline__
T operator()(const T &a, const T &b)
{
xgboost/cub/cub/util_allocator.cuh view on Meta::CPAN
return (a.bytes < b.bytes);
else
return (a.device < b.device);
}
};
/// BlockDescriptor comparator function interface
typedef bool (*Compare)(const BlockDescriptor &, const BlockDescriptor &);
class TotalBytes {
public:
size_t free;
size_t live;
TotalBytes() { free = live = 0; }
};
/// Set type for cached blocks (ordered by size)
typedef std::multiset<BlockDescriptor, Compare> CachedBlocks;
/// Set type for live blocks (ordered by ptr)
typedef std::multiset<BlockDescriptor, Compare> BusyBlocks;
xgboost/cub/cub/util_type.cuh view on Meta::CPAN
template <typename BinaryOpT> static char Test(SFINAE3<BinaryOpT, &BinaryOpT::operator()> *);
template <typename BinaryOpT> static char Test(SFINAE4<BinaryOpT, &BinaryOpT::operator()> *);
*/
template <typename BinaryOpT> static char Test(SFINAE5<BinaryOpT, &BinaryOpT::operator()> *);
template <typename BinaryOpT> static char Test(SFINAE6<BinaryOpT, &BinaryOpT::operator()> *);
template <typename BinaryOpT> static char Test(SFINAE7<BinaryOpT, &BinaryOpT::operator()> *);
template <typename BinaryOpT> static char Test(SFINAE8<BinaryOpT, &BinaryOpT::operator()> *);
template <typename BinaryOpT> static int Test(...);
public:
/// Whether the functor BinaryOp has a third <tt>unsigned int</tt> index param
static const bool HAS_PARAM = sizeof(Test<BinaryOp>(NULL)) == sizeof(char);
};
/******************************************************************************
* Simple type traits utilities.