view release on metacpan or search on metacpan
xgboost/R-package/configure view on Meta::CPAN
else
as_fn_arith ()
{
as_val=`expr "$@" || test $? -eq 1`
}
fi # as_fn_arith
# as_fn_error STATUS ERROR [LINENO LOG_FD]
# ----------------------------------------
# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
# script with STATUS, using 1 if that was 0.
as_fn_error ()
{
as_status=$1; test $as_status -eq 0 && as_status=1
if test "$4"; then
as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
$as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
fi
$as_echo "$as_me: error: $2" >&2
xgboost/R-package/configure view on Meta::CPAN
#
# `ac_cv_env_foo' variables (set or unset) will be overridden when
# loading this file, other *unset* `ac_cv_foo' will be assigned the
# following values.
_ACEOF
# The following way of writing the cache mishandles newlines in values,
# but we know of no workaround that is simple, portable, and efficient.
# So, we kill variables containing newlines.
# Ultrix sh set writes to stderr and can't be redirected directly,
# and sets the high bit in the cache file unless we assign to the vars.
(
for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
eval ac_val=\$$ac_var
case $ac_val in #(
*${as_nl}*)
case $ac_var in #(
*_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
esac
xgboost/R-package/configure view on Meta::CPAN
export LC_ALL
LANGUAGE=C
export LANGUAGE
# CDPATH.
(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
# as_fn_error STATUS ERROR [LINENO LOG_FD]
# ----------------------------------------
# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
# script with STATUS, using 1 if that was 0.
as_fn_error ()
{
as_status=$1; test $as_status -eq 0 && as_status=1
if test "$4"; then
as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
$as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
fi
$as_echo "$as_me: error: $2" >&2
xgboost/cub/cub/util_allocator.cuh view on Meta::CPAN
/**
* \brief Constructor.
*/
CachingDeviceAllocator(
unsigned int bin_growth, ///< Geometric growth factor for bin-sizes
unsigned int min_bin = 1, ///< Minimum bin (default is bin_growth ^ 1)
unsigned int max_bin = INVALID_BIN, ///< Maximum bin (default is no max bin)
size_t max_cached_bytes = INVALID_SIZE, ///< Maximum aggregate cached bytes per device (default is no limit)
bool skip_cleanup = false, ///< Whether or not to skip a call to \p FreeAllCached() when the destructor is called (default is to deallocate)
bool debug = false) ///< Whether or not to print (de)allocation events to stdout (default is no stderr output)
:
bin_growth(bin_growth),
min_bin(min_bin),
max_bin(max_bin),
min_bin_bytes(IntPow(bin_growth, min_bin)),
max_bin_bytes(IntPow(bin_growth, max_bin)),
max_cached_bytes(max_cached_bytes),
skip_cleanup(skip_cleanup),
debug(debug),
cached_blocks(BlockDescriptor::SizeCompare),
xgboost/cub/cub/util_debug.cuh view on Meta::CPAN
/// CUB namespace
namespace cub {
/**
* \addtogroup UtilMgmt
* @{
*/
/// CUB error reporting macro (prints error messages to stderr)
#if (defined(DEBUG) || defined(_DEBUG)) && !defined(CUB_STDERR)
#define CUB_STDERR
#endif
/**
* \brief %If \p CUB_STDERR is defined and \p error is not \p cudaSuccess, the corresponding error message is printed to \p stderr (or \p stdout in device code) along with the supplied source context.
*
* \return The CUDA error.
*/
__host__ __device__ __forceinline__ cudaError_t Debug(
cudaError_t error,
const char* filename,
int line)
{
(void)filename;
(void)line;
#ifdef CUB_STDERR
if (error)
{
#if (CUB_PTX_ARCH == 0)
fprintf(stderr, "CUDA error %d [%s, %d]: %s\n", error, filename, line, cudaGetErrorString(error));
fflush(stderr);
#elif (CUB_PTX_ARCH >= 200)
printf("CUDA error %d [block (%d,%d,%d) thread (%d,%d,%d), %s, %d]\n", error, blockIdx.z, blockIdx.y, blockIdx.x, threadIdx.z, threadIdx.y, threadIdx.x, filename, line);
#endif
}
#endif
return error;
}
/**
xgboost/cub/experimental/defunct/test_device_seg_reduce.cu view on Meta::CPAN
// Clear device output
CubDebugExit(cudaMemset(d_output, 0, sizeof(Value) * num_segments));
// Run warmup/correctness iteration
CubDebugExit(DeviceSegReduce::Sum(d_temp_storage, temp_storage_bytes, d_values, d_segment_offsets, d_output, num_values, num_segments, 0, true));
// Check for correctness (and display results, if specified)
int compare = CompareDeviceResults(h_reference, d_output, num_segments, true, g_verbose);
printf("\t%s", compare ? "FAIL" : "PASS");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Performance
GpuTimer gpu_timer;
gpu_timer.Start();
for (int i = 0; i < g_timing_iterations; ++i)
{
CubDebugExit(DeviceSegReduce::Sum(d_temp_storage, temp_storage_bytes, d_values, d_segment_offsets, d_output, num_values, num_segments, 0, false));
}
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
xgboost/cub/experimental/histogram_compare.cu view on Meta::CPAN
/**
* Reads a .tga image file
*/
void ReadTga(uchar4* &pixels, int &width, int &height, const char *filename)
{
// Open the file
FILE *fptr;
if ((fptr = fopen(filename, "rb")) == NULL)
{
fprintf(stderr, "File open failed\n");
exit(-1);
}
// Parse header
TgaHeader header;
header.Parse(fptr);
// header.Display(stdout);
width = header.width;
height = header.height;
// Verify compatibility
if (header.datatypecode != 2 && header.datatypecode != 10)
{
fprintf(stderr, "Can only handle image type 2 and 10\n");
exit(-1);
}
if (header.bitsperpixel != 16 && header.bitsperpixel != 24 && header.bitsperpixel != 32)
{
fprintf(stderr, "Can only handle pixel depths of 16, 24, and 32\n");
exit(-1);
}
if (header.colormaptype != 0 && header.colormaptype != 1)
{
fprintf(stderr, "Can only handle color map types of 0 and 1\n");
exit(-1);
}
// Skip unnecessary header info
int skip_bytes = header.idlength + (header.colormaptype * header.colormaplength);
fseek(fptr, skip_bytes, SEEK_CUR);
// Read the image
int pixel_bytes = header.bitsperpixel / 8;
// Allocate and initialize pixel data
size_t image_bytes = width * height * sizeof(uchar4);
if ((pixels == NULL) && ((pixels = (uchar4*) malloc(image_bytes)) == NULL))
{
fprintf(stderr, "malloc of image failed\n");
exit(-1);
}
memset(pixels, 0, image_bytes);
// Parse pixels
unsigned char tga_pixel[5];
int current_pixel = 0;
while (current_pixel < header.width * header.height)
{
if (header.datatypecode == 2)
{
// Uncompressed
if (fread(tga_pixel, 1, pixel_bytes, fptr) != pixel_bytes)
{
fprintf(stderr, "Unexpected end of file at pixel %d (uncompressed)\n", current_pixel);
exit(-1);
}
ParseTgaPixel(pixels[current_pixel], tga_pixel, pixel_bytes);
current_pixel++;
}
else if (header.datatypecode == 10)
{
// Compressed
if (fread(tga_pixel, 1, pixel_bytes + 1, fptr) != pixel_bytes + 1)
{
fprintf(stderr, "Unexpected end of file at pixel %d (compressed)\n", current_pixel);
exit(-1);
}
int run_length = tga_pixel[0] & 0x7f;
ParseTgaPixel(pixels[current_pixel], &(tga_pixel[1]), pixel_bytes);
current_pixel++;
if (tga_pixel[0] & 0x80)
{
// RLE chunk
for (int i = 0; i < run_length; i++)
xgboost/cub/experimental/histogram_compare.cu view on Meta::CPAN
current_pixel++;
}
}
else
{
// Normal chunk
for (int i = 0; i < run_length; i++)
{
if (fread(tga_pixel, 1, pixel_bytes, fptr) != pixel_bytes)
{
fprintf(stderr, "Unexpected end of file at pixel %d (normal)\n", current_pixel);
exit(-1);
}
ParseTgaPixel(pixels[current_pixel], tga_pixel, pixel_bytes);
current_pixel++;
}
}
}
}
// Close file
xgboost/cub/experimental/histogram_compare.cu view on Meta::CPAN
/**
* Generate a random image with specified entropy
*/
void GenerateRandomImage(uchar4* &pixels, int width, int height, int entropy_reduction)
{
int num_pixels = width * height;
size_t image_bytes = num_pixels * sizeof(uchar4);
if ((pixels == NULL) && ((pixels = (uchar4*) malloc(image_bytes)) == NULL))
{
fprintf(stderr, "malloc of image failed\n");
exit(-1);
}
for (int i = 0; i < num_pixels; ++i)
{
RandomBits(pixels[i].x, entropy_reduction);
RandomBits(pixels[i].y, entropy_reduction);
RandomBits(pixels[i].z, entropy_reduction);
RandomBits(pixels[i].w, entropy_reduction);
}
xgboost/cub/experimental/sparse_matrix.h view on Meta::CPAN
/**
* Builds a symmetric COO sparse from an asymmetric CSR matrix.
*/
template <typename CsrMatrixT>
void InitCsrSymmetric(CsrMatrixT &csr_matrix)
{
if (coo_tuples)
{
fprintf(stderr, "Matrix already constructed\n");
exit(1);
}
num_rows = csr_matrix.num_cols;
num_cols = csr_matrix.num_rows;
num_nonzeros = csr_matrix.num_nonzeros * 2;
coo_tuples = new CooTuple[num_nonzeros];
for (OffsetT row = 0; row < csr_matrix.num_rows; ++row)
{
xgboost/cub/experimental/sparse_matrix.h view on Meta::CPAN
}
/**
* Builds a COO sparse from a relabeled CSR matrix.
*/
template <typename CsrMatrixT>
void InitCsrRelabel(CsrMatrixT &csr_matrix, OffsetT* relabel_indices)
{
if (coo_tuples)
{
fprintf(stderr, "Matrix already constructed\n");
exit(1);
}
num_rows = csr_matrix.num_rows;
num_cols = csr_matrix.num_cols;
num_nonzeros = csr_matrix.num_nonzeros;
coo_tuples = new CooTuple[num_nonzeros];
for (OffsetT row = 0; row < num_rows; ++row)
{
xgboost/cub/experimental/sparse_matrix.h view on Meta::CPAN
/**
* Builds a METIS COO sparse from the given file.
*/
void InitMetis(const string &metis_filename)
{
if (coo_tuples)
{
fprintf(stderr, "Matrix already constructed\n");
exit(1);
}
// TODO
}
/**
* Builds a MARKET COO sparse from the given file.
*/
xgboost/cub/experimental/sparse_matrix.h view on Meta::CPAN
const string& market_filename,
ValueT default_value = 1.0,
bool verbose = false)
{
if (verbose) {
printf("Reading... "); fflush(stdout);
}
if (coo_tuples)
{
fprintf(stderr, "Matrix already constructed\n");
exit(1);
}
std::ifstream ifs;
ifs.open(market_filename.c_str(), std::ifstream::in);
if (!ifs.good())
{
fprintf(stderr, "Error opening file\n");
exit(1);
}
bool array = false;
bool symmetric = false;
bool skew = false;
int current_edge = -1;
char line[1024];
if (verbose) {
xgboost/cub/experimental/sparse_matrix.h view on Meta::CPAN
}
else if (array && (nparsed == 2))
{
// Allocate coo matrix
num_nonzeros = num_rows * num_cols;
coo_tuples = new CooTuple[num_nonzeros];
current_edge = 0;
}
else
{
fprintf(stderr, "Error parsing MARKET matrix: invalid problem description: %s\n", line);
exit(1);
}
}
else
{
// Edge
if (current_edge >= num_nonzeros)
{
fprintf(stderr, "Error parsing MARKET matrix: encountered more than %d num_nonzeros\n", num_nonzeros);
exit(1);
}
int row, col;
double val;
if (array)
{
if (sscanf(line, "%lf", &val) != 1)
{
fprintf(stderr, "Error parsing MARKET matrix: badly formed current_edge: '%s' at edge %d\n", line, current_edge);
exit(1);
}
col = (current_edge / num_rows);
row = (current_edge - (num_rows * col));
coo_tuples[current_edge] = CooTuple(row, col, val); // Convert indices to zero-based
}
else
{
// Parse nonzero (note: using strtol and strtod is 2x faster than sscanf or istream parsing)
char *l = line;
char *t = NULL;
// parse row
row = strtol(l, &t, 0);
if (t == l)
{
fprintf(stderr, "Error parsing MARKET matrix: badly formed row at edge %d\n", current_edge);
exit(1);
}
l = t;
// parse col
col = strtol(l, &t, 0);
if (t == l)
{
fprintf(stderr, "Error parsing MARKET matrix: badly formed col at edge %d\n", current_edge);
exit(1);
}
l = t;
// parse val
val = strtod(l, &t);
if (t == l)
{
val = default_value;
}
/*
int nparsed = sscanf(line, "%d %d %lf", &row, &col, &val);
if (nparsed == 2)
{
// No value specified
val = default_value;
}
else if (nparsed != 3)
{
fprintf(stderr, "Error parsing MARKET matrix 1: badly formed current_edge: %d parsed at edge %d\n", nparsed, current_edge);
exit(1);
}
*/
coo_tuples[current_edge] = CooTuple(row - 1, col - 1, val); // Convert indices to zero-based
}
current_edge++;
xgboost/cub/experimental/sparse_matrix.h view on Meta::CPAN
* Builds a dense matrix
*/
int InitDense(
OffsetT num_rows,
OffsetT num_cols,
ValueT default_value = 1.0,
bool verbose = false)
{
if (coo_tuples)
{
fprintf(stderr, "Matrix already constructed\n");
exit(1);
}
this->num_rows = num_rows;
this->num_cols = num_cols;
num_nonzeros = num_rows * num_cols;
coo_tuples = new CooTuple[num_nonzeros];
for (OffsetT row = 0; row < num_rows; ++row)
xgboost/cub/experimental/sparse_matrix.h view on Meta::CPAN
/**
* Builds a wheel COO sparse matrix having spokes spokes.
*/
int InitWheel(
OffsetT spokes,
ValueT default_value = 1.0,
bool verbose = false)
{
if (coo_tuples)
{
fprintf(stderr, "Matrix already constructed\n");
exit(1);
}
num_rows = spokes + 1;
num_cols = num_rows;
num_nonzeros = spokes * 2;
coo_tuples = new CooTuple[num_nonzeros];
// Add spoke num_nonzeros
int current_edge = 0;
xgboost/cub/experimental/sparse_matrix.h view on Meta::CPAN
/**
* Builds a square 2D grid CSR matrix. Interior num_vertices have degree 5 when including
* a self-loop.
*
* Returns 0 on success, 1 on failure.
*/
int InitGrid2d(OffsetT width, bool self_loop, ValueT default_value = 1.0)
{
if (coo_tuples)
{
fprintf(stderr, "Matrix already constructed\n");
exit(1);
}
int interior_nodes = (width - 2) * (width - 2);
int edge_nodes = (width - 2) * 4;
int corner_nodes = 4;
num_rows = width * width;
num_cols = num_rows;
num_nonzeros = (interior_nodes * 4) + (edge_nodes * 3) + (corner_nodes * 2);
xgboost/cub/experimental/sparse_matrix.h view on Meta::CPAN
/**
* Builds a square 3D grid COO sparse matrix. Interior num_vertices have degree 7 when including
* a self-loop. Values are unintialized, coo_tuples are sorted.
*/
int InitGrid3d(OffsetT width, bool self_loop, ValueT default_value = 1.0)
{
if (coo_tuples)
{
fprintf(stderr, "Matrix already constructed\n");
return -1;
}
OffsetT interior_nodes = (width - 2) * (width - 2) * (width - 2);
OffsetT face_nodes = (width - 2) * (width - 2) * 6;
OffsetT edge_nodes = (width - 2) * 12;
OffsetT corner_nodes = 8;
num_cols = width * width * width;
num_rows = num_cols;
num_nonzeros = (interior_nodes * 6) + (face_nodes * 5) + (edge_nodes * 4) + (corner_nodes * 3);
xgboost/cub/experimental/spmv_compare.cu view on Meta::CPAN
// Generate dense graph
OffsetT size = 1 << 24; // 16M nnz
args.GetCmdLineArgument("size", size);
OffsetT rows = size / dense;
printf("dense_%d_x_%d, ", rows, dense); fflush(stdout);
coo_matrix.InitDense(rows, dense);
}
else
{
fprintf(stderr, "No graph type specified.\n");
exit(1);
}
RunTest(
rcm_relabel,
alpha,
beta,
coo_matrix,
timing_iterations,
args);
xgboost/cub/test/test_allocator.cu view on Meta::CPAN
// Check that that still we have 0 live block across all GPUs
AssertEquals(allocator.live_blocks.size(), 0);
}
#endif // CUB_CDP
//
// Performance
//
printf("\nCPU Performance (%d timing iterations, %d bytes):\n", timing_iterations, timing_bytes);
fflush(stdout); fflush(stderr);
// CPU performance comparisons vs cached. Allocate and free a 1MB block 2000 times
CpuTimer cpu_timer;
char *d_1024MB = NULL;
allocator.debug = false;
// Prime the caching allocator and the kernel
CubDebugExit(allocator.DeviceAllocate((void **) &d_1024MB, timing_bytes));
CubDebugExit(allocator.DeviceFree(d_1024MB));
cub::EmptyKernel<void><<<1, 32>>>();
xgboost/cub/test/test_allocator.cu view on Meta::CPAN
printf("\t CUB CachingDeviceAllocator allocation CPU speedup: %.2f (avg cudaMalloc %.4f ms vs. avg DeviceAllocate %.4f ms)\n",
cuda_malloc_elapsed_millis / cub_calloc_elapsed_millis,
cuda_malloc_elapsed_millis / timing_iterations,
cub_calloc_elapsed_millis / timing_iterations);
// GPU performance comparisons. Allocate and free a 1MB block 2000 times
GpuTimer gpu_timer;
printf("\nGPU Performance (%d timing iterations, %d bytes):\n", timing_iterations, timing_bytes);
fflush(stdout); fflush(stderr);
// Kernel-only
gpu_timer.Start();
for (int i = 0; i < timing_iterations; ++i)
{
cub::EmptyKernel<void><<<1, 32>>>();
}
gpu_timer.Stop();
float cuda_empty_elapsed_millis = gpu_timer.ElapsedMillis();
xgboost/cub/test/test_block_histogram.cu view on Meta::CPAN
// Run kernel
BlockHistogramKernel<BINS, BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM><<<1, BLOCK_THREADS>>>(
d_samples,
d_histogram);
// Check for correctness (and display results, if specified)
int compare = CompareDeviceResults((int*) h_reference, d_histogram, BINS, g_verbose, g_verbose);
printf("\t%s\n\n", compare ? "FAIL" : "PASS");
// Flush any stdout/stderr
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
fflush(stdout);
fflush(stderr);
// Cleanup
if (h_samples) delete[] h_samples;
if (h_reference) delete[] h_reference;
if (d_samples) CubDebugExit(g_allocator.DeviceFree(d_samples));
if (d_histogram) CubDebugExit(g_allocator.DeviceFree(d_histogram));
// Correctness asserts
AssertEquals(0, compare);
}
xgboost/cub/test/test_device_histogram.cu view on Meta::CPAN
d_samples, d_histogram, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
0, true);
// Check canary zones
int error = CompareDeviceResults(canary_zone, (char *) d_temp_storage, canary_bytes, true, g_verbose);
AssertEquals(0, error);
error = CompareDeviceResults(canary_zone, ((char *) d_temp_storage) + canary_bytes + temp_storage_bytes, canary_bytes, true, g_verbose);
AssertEquals(0, error);
// Flush any stdout/stderr
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
fflush(stdout);
fflush(stderr);
// Check for correctness (and display results, if specified)
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
int channel_error = CompareDeviceResults(h_histogram[channel], d_histogram[channel], num_levels[channel] - 1, true, g_verbose);
printf("\tChannel %d %s", channel, channel_error ? "FAIL" : "PASS\n");
error |= channel_error;
}
// Performance
xgboost/cub/test/test_device_histogram.cu view on Meta::CPAN
d_samples, d_histogram, num_levels, d_levels,
num_row_pixels, num_rows, row_stride_bytes,
0, true);
// Check canary zones
int error = CompareDeviceResults(canary_zone, (char *) d_temp_storage, canary_bytes, true, g_verbose);
AssertEquals(0, error);
error = CompareDeviceResults(canary_zone, ((char *) d_temp_storage) + canary_bytes + temp_storage_bytes, canary_bytes, true, g_verbose);
AssertEquals(0, error);
// Flush any stdout/stderr
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
fflush(stdout);
fflush(stderr);
// Check for correctness (and display results, if specified)
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
int channel_error = CompareDeviceResults(h_histogram[channel], d_histogram[channel], num_levels[channel] - 1, true, g_verbose);
printf("\tChannel %d %s", channel, channel_error ? "FAIL" : "PASS\n");
error |= channel_error;
}
// Performance
xgboost/cub/test/test_device_radix_sort.cu view on Meta::CPAN
}
CubDebugExit(cudaMemcpy(d_segment_offsets, h_segment_offsets, sizeof(int) * (num_segments + 1), cudaMemcpyHostToDevice));
// Run warmup/correctness iteration
CubDebugExit(Dispatch(
Int2Type<IS_DESCENDING>(), Int2Type<BACKEND>(), d_selector, d_temp_storage_bytes, d_cdp_error,
mis_aligned_temp, temp_storage_bytes, d_keys, d_values,
num_items, num_segments, d_segment_offsets,
begin_bit, end_bit, 0, true));
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Check for correctness (and display results, if specified)
printf("Warmup done. Checking results:\n"); fflush(stdout);
int compare = CompareDeviceResults(h_reference_keys, d_keys.Current(), num_items, true, g_verbose);
printf("\t Compare keys (selector %d): %s ", d_keys.selector, compare ? "FAIL" : "PASS"); fflush(stdout);
if (!KEYS_ONLY)
{
int values_compare = CompareDeviceResults(h_reference_values, d_values.Current(), num_items, true, g_verbose);
compare |= values_compare;
printf("\t Compare values (selector %d): %s ", d_values.selector, values_compare ? "FAIL" : "PASS"); fflush(stdout);
xgboost/cub/test/test_device_reduce.cu view on Meta::CPAN
// Run warmup/correctness iteration
CubDebugExit(Dispatch(backend, 1,
d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, num_segments, d_segment_offsets,
reduction_op, 0, true));
// Check for correctness (and display results, if specified)
int compare = CompareDeviceResults(h_reference, d_out, num_segments, g_verbose, g_verbose);
printf("\t%s", compare ? "FAIL" : "PASS");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Performance
if (g_timing_iterations > 0)
{
GpuTimer gpu_timer;
gpu_timer.Start();
CubDebugExit(Dispatch(backend, g_timing_iterations,
d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, num_segments, d_segment_offsets,
xgboost/cub/test/test_device_reduce_by_key.cu view on Meta::CPAN
// Check for correctness (and display results, if specified)
int compare1 = CompareDeviceResults(h_keys_reference, d_keys_out, num_segments, true, g_verbose);
printf("\t Keys %s ", compare1 ? "FAIL" : "PASS");
int compare2 = CompareDeviceResults(h_values_reference, d_values_out, num_segments, true, g_verbose);
printf("\t Values %s ", compare2 ? "FAIL" : "PASS");
int compare3 = CompareDeviceResults(&num_segments, d_num_runs, 1, true, g_verbose);
printf("\t Count %s ", compare3 ? "FAIL" : "PASS");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Performance
GpuTimer gpu_timer;
gpu_timer.Start();
CubDebugExit(Dispatch(Int2Type<BACKEND>(), g_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, d_num_runs, equality_op, reduction_op, num_items, 0, false));
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
if (g_timing_iterations > 0)
{
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f;
int bytes_moved = ((num_items + num_segments) * sizeof(KeyT)) + ((num_items + num_segments) * sizeof(ValueT));
float giga_bandwidth = float(bytes_moved) / avg_millis / 1000.0f / 1000.0f;
printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s", avg_millis, giga_rate, giga_bandwidth);
}
printf("\n\n");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Cleanup
if (d_keys_out) CubDebugExit(g_allocator.DeviceFree(d_keys_out));
if (d_values_out) CubDebugExit(g_allocator.DeviceFree(d_values_out));
if (d_num_runs) CubDebugExit(g_allocator.DeviceFree(d_num_runs));
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
xgboost/cub/test/test_device_run_length_encode.cu view on Meta::CPAN
if (RLE_METHOD != CSR)
{
compare2 = CompareDeviceResults(h_lengths_reference, d_lengths_out, num_runs, true, g_verbose);
printf("\t Lengths %s\n", compare2 ? "FAIL" : "PASS");
}
compare3 = CompareDeviceResults(&num_runs, d_num_runs, 1, true, g_verbose);
printf("\t Count %s\n", compare3 ? "FAIL" : "PASS");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Performance
GpuTimer gpu_timer;
gpu_timer.Start();
CubDebugExit(Dispatch(Int2Type<RLE_METHOD>(), Int2Type<BACKEND>(), g_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_offsets_out, d_lengths_out, d_num_runs, equality_op, num_items, 0...
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
if (g_timing_iterations > 0)
{
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f;
int bytes_moved = (num_items * sizeof(T)) + (num_runs * (sizeof(OffsetT) + sizeof(LengthT)));
float giga_bandwidth = float(bytes_moved) / avg_millis / 1000.0f / 1000.0f;
printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s", avg_millis, giga_rate, giga_bandwidth);
}
printf("\n\n");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Cleanup
if (d_unique_out) CubDebugExit(g_allocator.DeviceFree(d_unique_out));
if (d_offsets_out) CubDebugExit(g_allocator.DeviceFree(d_offsets_out));
if (d_lengths_out) CubDebugExit(g_allocator.DeviceFree(d_lengths_out));
if (d_num_runs) CubDebugExit(g_allocator.DeviceFree(d_num_runs));
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
xgboost/cub/test/test_device_scan.cu view on Meta::CPAN
scan_op,
initial_value,
num_items,
0,
true));
// Check for correctness (and display results, if specified)
int compare = CompareDeviceResults(h_reference, d_out, num_items, true, g_verbose);
printf("\t%s", compare ? "FAIL" : "PASS");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Performance
GpuTimer gpu_timer;
gpu_timer.Start();
CubDebugExit(Dispatch(Int2Type<BACKEND>(),
Int2Type<Traits<OutputT>::PRIMITIVE>(),
g_timing_iterations,
d_temp_storage_bytes,
d_cdp_error,
d_temp_storage,
xgboost/cub/test/test_device_select_if.cu view on Meta::CPAN
// Check for correctness (and display results, if specified)
int compare1 = (IS_PARTITION) ?
CompareDeviceResults(h_reference, d_out, num_items, true, g_verbose) :
CompareDeviceResults(h_reference, d_out, num_selected, true, g_verbose);
printf("\t Data %s\n", compare1 ? "FAIL" : "PASS");
int compare2 = CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose);
printf("\t Count %s\n", compare2 ? "FAIL" : "PASS");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Performance
GpuTimer gpu_timer;
gpu_timer.Start();
CubDebugExit(Dispatch(Int2Type<BACKEND>(), Int2Type<IS_FLAGGED>(), Int2Type<IS_PARTITION>(), g_timing_iterations, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, false));
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
xgboost/cub/test/test_device_select_if.cu view on Meta::CPAN
float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f;
int num_output_items = (IS_PARTITION) ? num_items : num_selected;
int num_flag_items = (IS_FLAGGED) ? num_items : 0;
size_t num_bytes = sizeof(T) * (num_items + num_output_items) + sizeof(FlagT) * num_flag_items;
float giga_bandwidth = float(num_bytes) / avg_millis / 1000.0f / 1000.0f;
printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak", avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0);
}
printf("\n\n");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Cleanup
if (d_flags) CubDebugExit(g_allocator.DeviceFree(d_flags));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out));
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
xgboost/cub/test/test_device_select_unique.cu view on Meta::CPAN
// Run warmup/correctness iteration
CubDebugExit(Dispatch(Int2Type<BACKEND>(), 1, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, 0, true));
// Check for correctness (and display results, if specified)
int compare1 = CompareDeviceResults(h_reference, d_out, num_selected, true, g_verbose);
printf("\t Data %s ", compare1 ? "FAIL" : "PASS");
int compare2 = CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose);
printf("\t Count %s ", compare2 ? "FAIL" : "PASS");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Performance
GpuTimer gpu_timer;
gpu_timer.Start();
CubDebugExit(Dispatch(Int2Type<BACKEND>(), g_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, 0, false));
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
if (g_timing_iterations > 0)
{
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f;
float giga_bandwidth = float((num_items + num_selected) * sizeof(T)) / avg_millis / 1000.0f / 1000.0f;
printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak", avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0);
}
printf("\n\n");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Cleanup
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out));
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, compare1 | compare2);
xgboost/cub/test/test_util.h view on Meta::CPAN
{
cudaError_t error = cudaSuccess;
do
{
int deviceCount;
error = CubDebug(cudaGetDeviceCount(&deviceCount));
if (error) break;
if (deviceCount == 0) {
fprintf(stderr, "No devices supporting CUDA.\n");
exit(1);
}
if (dev < 0)
{
GetCmdLineArgument("device", dev);
}
if ((dev > deviceCount - 1) || (dev < 0))
{
dev = 0;
}
xgboost/cub/test/test_util.h view on Meta::CPAN
CubDebugExit(cudaMemGetInfo(&device_free_physmem, &device_total_physmem));
int ptx_version;
error = CubDebug(cub::PtxVersion(ptx_version));
if (error) break;
error = CubDebug(cudaGetDeviceProperties(&deviceProp, dev));
if (error) break;
if (deviceProp.major < 1) {
fprintf(stderr, "Device does not support CUDA.\n");
exit(1);
}
device_giga_bandwidth = float(deviceProp.memoryBusWidth) * deviceProp.memoryClockRate * 2 / 8 / 1000 / 1000;
if (!CheckCmdLineFlag("quiet"))
{
printf(
"Using device %d: %s (PTX version %d, SM%d, %d SMs, "
"%lld free / %lld total MB physmem, "
xgboost/demo/binary_classification/README.md view on Meta::CPAN
#### Monitoring Progress
When you run training we can find there are messages displayed on screen
```
tree train end, 1 roots, 12 extra nodes, 0 pruned nodes ,max_depth=3
[0] test-error:0.016139
boosting round 1, 0 sec elapsed
tree train end, 1 roots, 10 extra nodes, 0 pruned nodes ,max_depth=3
[1] test-error:0.000000
```
The messages for evaluation are printed into stderr, so if you want only to log the evaluation progress, simply type
```
../../xgboost mushroom.conf 2>log.txt
```
Then you can find the following content in log.txt
```
[0] test-error:0.016139
[1] test-error:0.000000
```
We can also monitor both training and test statistics, by adding following lines to configure
```conf
xgboost/dmlc-core/doc/Doxyfile view on Meta::CPAN
# doxygen can produce. The string should contain the $file, $line, and $text
# tags, which will be replaced by the file and line number from which the
# warning originated and the warning text. Optionally the format may contain
# $version, which will be replaced by the version of the file (if it could
# be obtained via FILE_VERSION_FILTER)
WARN_FORMAT = "$file:$line: $text"
# The WARN_LOGFILE tag can be used to specify a file to which warning
# and error messages should be written. If left blank the output is written
# to stderr.
WARN_LOGFILE =
#---------------------------------------------------------------------------
# configuration options related to the input files
#---------------------------------------------------------------------------
# The INPUT tag can be used to specify the files and/or directories that contain
# documented source files. You may enter file names like "myfile.cpp" or
# directories like "/usr/src/myproject". Separate the files or directories
xgboost/dmlc-core/doc/conf.py view on Meta::CPAN
# hook for doxygen
def run_doxygen(folder):
"""Run the doxygen make command in the designated folder."""
try:
retcode = subprocess.call("cd %s; make doxygen" % folder, shell=True)
retcode = subprocess.call("rm -rf _build/html/doxygen", shell=True)
retcode = subprocess.call("mkdir _build", shell=True)
retcode = subprocess.call("mkdir _build/html", shell=True)
retcode = subprocess.call("cp -rf doxygen/html _build/html/doxygen", shell=True)
if retcode < 0:
sys.stderr.write("doxygen terminated by signal %s" % (-retcode))
except OSError as e:
sys.stderr.write("doxygen execution failed: %s" % e)
def generate_doxygen_xml(app):
"""Run the doxygen make commands if we're on the ReadTheDocs server"""
run_doxygen('..')
def setup(app):
# Add hook for building doxygen xml when needed
app.connect("builder-inited", generate_doxygen_xml)
app.add_config_value('recommonmark_config', {
xgboost/dmlc-core/scripts/lint.py view on Meta::CPAN
_cpplint_state.PrintErrorCounts()
errors = _cpplint_state.errors_by_category.copy()
if suffix == 'h':
self.cpp_header_map[str(path)] = errors
else:
self.cpp_src_map[str(path)] = errors
def process_python(self, path):
"""Process a python file."""
(pylint_stdout, pylint_stderr) = epylint.py_run(
' '.join([str(path)] + self.pylint_opts), return_std=True)
emap = {}
err = pylint_stderr.read()
if len(err):
print(err)
for line in pylint_stdout:
sys.stderr.write(line)
key = line.split(':')[-1].split('(')[0].strip()
if key not in self.pylint_cats:
continue
if key not in emap:
emap[key] = 1
else:
emap[key] += 1
self.python_map[str(path)] = emap
def print_summary(self, strm):
xgboost/dmlc-core/scripts/lint.py view on Meta::CPAN
if args.pylint_rc is not None:
_HELPER.pylint_opts = ['--rcfile='+args.pylint_rc,]
file_type = args.filetype
allow_type = []
if file_type == 'python' or file_type == 'all':
allow_type += [x for x in PYTHON_SUFFIX]
if file_type == 'cpp' or file_type == 'all':
allow_type += [x for x in CXX_SUFFIX]
allow_type = set(allow_type)
if sys.version_info.major == 2 and os.name != 'nt':
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
for path in args.path:
if os.path.isfile(path):
process(path, allow_type)
else:
for root, dirs, files in os.walk(path):
for name in files:
process(os.path.join(root, name), allow_type)
nerr = _HELPER.print_summary(sys.stderr)
sys.exit(nerr > 0)
if __name__ == '__main__':
main()
xgboost/dmlc-core/tracker/README.md view on Meta::CPAN
- Memory needed for server job.
- ```--server-memory``` string, default='1g'
- Memory needed for server job.
- ```--jobname``` string, default=auto specify
- Name of the job.
- ```--queue``` string, default='default'
- The submission queue we should submit the job to.
- ```--log-level``` string, {INFO, DEBUG}
- The logging level.
- ```--log-file``` string, default='None'
- Output log to the specific log file, the log is still printed on stderr.
xgboost/dmlc-core/tracker/dmlc_tracker/mpi.py view on Meta::CPAN
from threading import Thread
from . import tracker
def get_mpi_env(envs):
"""get the mpirun command for setting the envornment
support both openmpi and mpich2
"""
# decide MPI version.
(_, err) = subprocess.Popen('mpirun',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
cmd = ''
if 'Open MPI' in err:
for k, v in envs.items():
cmd += ' -x %s=%s' % (k, str(v))
elif 'mpich' in err:
for k, v in envs.items():
cmd += ' -env %s %s' % (k, str(v))
else:
raise RuntimeError('Unknown MPI Version')
return cmd
xgboost/dmlc-core/tracker/dmlc_tracker/opts.py view on Meta::CPAN
help=('Memory need to be allocated for each server, ' +
'need to ends with g or m.'))
parser.add_argument('--jobname', default=None, type=str, help='Name of the job.')
parser.add_argument('--queue', default='default', type=str,
help='The submission queue the job should goes to.')
parser.add_argument('--log-level', default='INFO', type=str,
choices=['INFO', 'DEBUG'],
help='Logging level of the logger.')
parser.add_argument('--log-file', default=None, type=str,
help=('Output log to the specific log file, ' +
'the log is still printed on stderr.'))
parser.add_argument('--host-ip', default=None, type=str,
help=('Host IP addressed, this is only needed ' +
'if the host IP cannot be automatically guessed.'))
parser.add_argument('--hdfs-tempdir', default='/tmp', type=str,
help=('Temporary directory in HDFS, ' +
' only needed in YARN mode.'))
parser.add_argument('--host-file', default=None, type=str,
help=('The file contains the list of hostnames, needed for MPI and ssh.'))
parser.add_argument('--sge-log-dir', default=None, type=str,
help=('Log directory of SGD jobs, only needed in SGE mode.'))
xgboost/dmlc-core/tracker/yarn/src/main/java/org/apache/hadoop/yarn/dmlc/ApplicationMaster.java view on Meta::CPAN
}
private synchronized void launchDummyTask(Container container){
ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);
String new_command = "./launcher.py";
String cmd = new_command + " 1>"
+ ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"
+ " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
+ "/stderr";
ctx.setCommands(Collections.singletonList(cmd));
ctx.setTokens(setupTokens());
ctx.setLocalResources(this.workerResources);
synchronized (this){
this.nmClient.startContainerAsync(container, ctx);
}
}
/**
* launch the task on container
*
xgboost/dmlc-core/tracker/yarn/src/main/java/org/apache/hadoop/yarn/dmlc/ApplicationMaster.java view on Meta::CPAN
private void launchTask(Container container, TaskRecord task) {
task.container = container;
task.containerRequest = null;
ContainerLaunchContext ctx = Records
.newRecord(ContainerLaunchContext.class);
String cmd =
// use this to setup CLASSPATH correctly for libhdfs
this.command + " 1>"
+ ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"
+ " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
+ "/stderr";
ctx.setCommands(Collections.singletonList(cmd));
// TODO: token was not right
ctx.setTokens(setupTokens());
LOG.info(workerResources);
ctx.setLocalResources(this.workerResources);
// setup environment variables
boolean isWindows = System.getProperty("os.name").startsWith("Windows");
// setup class path, this is kind of duplicated, ignoring
String classPathStr = isWindows? "%CLASSPATH%" : "${CLASSPATH}";
xgboost/dmlc-core/tracker/yarn/src/main/java/org/apache/hadoop/yarn/dmlc/Client.java view on Meta::CPAN
// setup security token
amContainer.setTokens(this.setupTokens());
// setup cache-files and environment variables
amContainer.setLocalResources(this.setupCacheFiles(appId));
amContainer.setEnvironment(this.getEnvironment());
String cmd = Environment.JAVA_HOME.$$() + "/bin/java"
+ " -Xmx900m"
+ " org.apache.hadoop.yarn.dmlc.ApplicationMaster"
+ this.cacheFileArg + ' ' + this.appArgs + " 1>"
+ ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"
+ " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr";
LOG.debug(cmd);
amContainer.setCommands(Collections.singletonList(cmd));
// Set up resource type requirements for ApplicationMaster
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(1024);
capability.setVirtualCores(1);
LOG.info("jobname=" + this.jobName + ",username=" + this.userName);
xgboost/doc/Doxyfile view on Meta::CPAN
#---------------------------------------------------------------------------
# The QUIET tag can be used to turn on/off the messages that are generated to
# standard output by doxygen. If QUIET is set to YES this implies that the
# messages are off.
# The default value is: NO.
QUIET = NO
# The WARNINGS tag can be used to turn on/off the warning messages that are
# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
# this implies that the warnings are on.
#
# Tip: Turn warnings on while writing the documentation.
# The default value is: YES.
WARNINGS = YES
# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
# will automatically be disabled.
xgboost/doc/Doxyfile view on Meta::CPAN
# will be replaced by the file and line number from which the warning originated
# and the warning text. Optionally the format may contain $version, which will
# be replaced by the version of the file (if it could be obtained via
# FILE_VERSION_FILTER)
# The default value is: $file:$line: $text.
WARN_FORMAT = "$file:$line: $text"
# The WARN_LOGFILE tag can be used to specify a file to which warning and error
# messages should be written. If left blank the output is written to standard
# error (stderr).
WARN_LOGFILE =
#---------------------------------------------------------------------------
# Configuration options related to the input files
#---------------------------------------------------------------------------
# The INPUT tag is used to specify the files and/or directories that contain
# documented source files. You may enter file names like myfile.cpp or
# directories like /usr/src/myproject. Separate the files or directories with
xgboost/doc/conf.py view on Meta::CPAN
(master_doc, '%s.tex' % project, project,
author, 'manual'),
]
# hook for doxygen
def run_doxygen(folder):
"""Run the doxygen make command in the designated folder."""
try:
retcode = subprocess.call("cd %s; make doxygen" % folder, shell=True)
if retcode < 0:
sys.stderr.write("doxygen terminated by signal %s" % (-retcode))
except OSError as e:
sys.stderr.write("doxygen execution failed: %s" % e)
def generate_doxygen_xml(app):
"""Run the doxygen make commands if we're on the ReadTheDocs server"""
read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True'
if read_the_docs_build:
run_doxygen('..')
def setup(app):
# Add hook for building doxygen xml when needed
# no c++ API for now
xgboost/doc/sphinx_util.py view on Meta::CPAN
subprocess.call('cd ../recommonmark/; git pull', shell=True)
if not os.path.exists('web-data'):
subprocess.call('rm -rf web-data;' +
'git clone https://github.com/dmlc/web-data', shell = True)
else:
subprocess.call('cd web-data; git pull', shell=True)
sys.path.insert(0, os.path.abspath('../recommonmark/'))
sys.stderr.write('READTHEDOCS=%s\n' % (READTHEDOCS_BUILD))
from recommonmark import parser, transform
MarkdownParser = parser.CommonMarkParser
AutoStructify = transform.AutoStructify