Alien-XGBoost

 view release on metacpan or  search on metacpan

xgboost/R-package/R/callbacks.R  view on Meta::CPAN

  
  # run some checks in the begining
  init <- function(env) {
    nrounds <<- env$end_iteration - env$begin_iteration + 1
    
    if (is.null(env$bst) && is.null(env$bst_folds))
      stop("Parent frame has neither 'bst' nor 'bst_folds'")
    
    # Some parameters are not allowed to be changed,
    # since changing them would simply wreck some chaos
    not_allowed <- pnames %in% 
      c('num_class', 'num_output_group', 'size_leaf_vector', 'updater_seq')
    if (any(not_allowed))
      stop('Parameters ', paste(pnames[not_allowed]), " cannot be changed during boosting.")
    
    for (n in pnames) {
      p <- new_params[[n]]
      if (is.function(p)) {
        if (length(formals(p)) != 2)
          stop("Parameter '", n, "' is a function but not of two arguments")
      } else if (is.numeric(p) || is.character(p)) {
        if (length(p) != nrounds)
          stop("Length of '", n, "' has to be equal to 'nrounds'")
      } else {

xgboost/R-package/tests/testthat/test_lint.R  view on Meta::CPAN

context("Code is of high quality and lint free")
test_that("Code Lint", {
  skip_on_cran()
  skip_on_travis()
  skip_if_not_installed("lintr")
  my_linters <- list(
    absolute_paths_linter=lintr::absolute_paths_linter,
    assignment_linter=lintr::assignment_linter,
    closed_curly_linter=lintr::closed_curly_linter,
    commas_linter=lintr::commas_linter,
    # commented_code_linter=lintr::commented_code_linter,
    infix_spaces_linter=lintr::infix_spaces_linter,
    line_length_linter=lintr::line_length_linter,
    no_tab_linter=lintr::no_tab_linter,
    object_usage_linter=lintr::object_usage_linter,

xgboost/src/tree/param.h  view on Meta::CPAN

  }
  /*! \brief whether need backward big to small search: default left */
  inline bool need_backward_search(float col_density, bool indicator) const {
    return this->default_direction != 2;
  }
  /*! \brief given the loss change, whether we need to invoke pruning */
  inline bool need_prune(double loss_chg, int depth) const {
    return loss_chg < this->min_split_loss;
  }
  /*! \brief whether we can split with current hessian */
  inline bool cannot_split(double sum_hess, int depth) const {
    return sum_hess < this->min_child_weight * 2.0;
  }
  /*! \brief maximum sketch size */
  inline unsigned max_sketch_size() const {
    unsigned ret = static_cast<unsigned>(sketch_ratio / sketch_eps);
    CHECK_GT(ret, 0U);
    return ret;
  }
};

xgboost/tests/python/test_with_sklearn.py  view on Meta::CPAN


    digits_2class = load_digits(2)

    X = digits_2class['data']
    y = digits_2class['target']

    dm = xgb.DMatrix(X, label=y)
    params = {'max_depth': 6, 'eta': 0.01, 'silent': 1, 'objective': 'binary:logistic'}

    gbdt = xgb.train(params, dm, num_boost_round=10)
    assert gbdt.get_split_value_histogram("not_there", as_pandas=True).shape[0] == 0
    assert gbdt.get_split_value_histogram("not_there", as_pandas=False).shape[0] == 0
    assert gbdt.get_split_value_histogram("f28", bins=0).shape[0] == 1
    assert gbdt.get_split_value_histogram("f28", bins=1).shape[0] == 1
    assert gbdt.get_split_value_histogram("f28", bins=2).shape[0] == 2
    assert gbdt.get_split_value_histogram("f28", bins=5).shape[0] == 2
    assert gbdt.get_split_value_histogram("f28", bins=None).shape[0] == 2


def test_sklearn_random_state():
    tm._skip_if_no_sklearn()



( run in 0.488 second using v1.01-cache-2.11-cpan-cc502c75498 )