Alien-XGBoost
view release on metacpan or search on metacpan
xgboost/cub/cub/block/specializations/block_histogram_sort.cuh view on Meta::CPAN
};
/// Alias wrapper allowing storage to be unioned
struct TempStorage : Uninitialized<_TempStorage> {};
// Thread fields
_TempStorage &temp_storage;
unsigned int linear_tid;
/// Constructor
__device__ __forceinline__ BlockHistogramSort(
TempStorage &temp_storage)
:
temp_storage(temp_storage.Alias()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
// Discontinuity functor
struct DiscontinuityOp
{
// Reference to temp_storage
_TempStorage &temp_storage;
// Constructor
__device__ __forceinline__ DiscontinuityOp(_TempStorage &temp_storage) :
temp_storage(temp_storage)
{}
// Discontinuity predicate
__device__ __forceinline__ bool operator()(const T &a, const T &b, int b_index)
{
if (a != b)
{
// Note the begin/end offsets in shared storage
temp_storage.run_begin[b] = b_index;
temp_storage.run_end[a] = b_index;
return true;
}
else
{
return false;
}
}
};
// Composite data onto an existing histogram
template <
typename CounterT >
__device__ __forceinline__ void Composite(
T (&items)[ITEMS_PER_THREAD], ///< [in] Calling thread's input values to histogram
CounterT histogram[BINS]) ///< [out] Reference to shared/device-accessible memory histogram
{
enum { TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD };
// Sort bytes in blocked arrangement
BlockRadixSortT(temp_storage.sort).Sort(items);
CTA_SYNC();
// Initialize the shared memory's run_begin and run_end for each bin
int histo_offset = 0;
#pragma unroll
for(; histo_offset + BLOCK_THREADS <= BINS; histo_offset += BLOCK_THREADS)
{
temp_storage.run_begin[histo_offset + linear_tid] = TILE_SIZE;
temp_storage.run_end[histo_offset + linear_tid] = TILE_SIZE;
}
// Finish up with guarded initialization if necessary
if ((BINS % BLOCK_THREADS != 0) && (histo_offset + linear_tid < BINS))
{
temp_storage.run_begin[histo_offset + linear_tid] = TILE_SIZE;
temp_storage.run_end[histo_offset + linear_tid] = TILE_SIZE;
}
CTA_SYNC();
int flags[ITEMS_PER_THREAD]; // unused
// Compute head flags to demarcate contiguous runs of the same bin in the sorted tile
DiscontinuityOp flag_op(temp_storage);
BlockDiscontinuityT(temp_storage.flag).FlagHeads(flags, items, flag_op);
// Update begin for first item
if (linear_tid == 0) temp_storage.run_begin[items[0]] = 0;
CTA_SYNC();
// Composite into histogram
histo_offset = 0;
#pragma unroll
for(; histo_offset + BLOCK_THREADS <= BINS; histo_offset += BLOCK_THREADS)
{
int thread_offset = histo_offset + linear_tid;
CounterT count = temp_storage.run_end[thread_offset] - temp_storage.run_begin[thread_offset];
histogram[thread_offset] += count;
}
// Finish up with guarded composition if necessary
if ((BINS % BLOCK_THREADS != 0) && (histo_offset + linear_tid < BINS))
{
int thread_offset = histo_offset + linear_tid;
CounterT count = temp_storage.run_end[thread_offset] - temp_storage.run_begin[thread_offset];
histogram[thread_offset] += count;
}
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
( run in 2.742 seconds using v1.01-cache-2.11-cpan-39bf76dae61 )