Skip to content

Commit

Permalink
Merge branch 'master' of github.com:0xdata/h2o
Browse files Browse the repository at this point in the history
Conflicts:
	src/main/java/water/util/ChunkSummary.java
  • Loading branch information
cliffclick committed Aug 4, 2014
2 parents 917d6f9 + 4ef7e7e commit 16f16cc
Show file tree
Hide file tree
Showing 30 changed files with 108 additions and 317 deletions.
2 changes: 1 addition & 1 deletion R/h2o-DESCRIPTION.template
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ Title: H2O R Interface
Version: SUBST_PROJECT_VERSION
Date: 2014-05-15
Author: Anqi Fu, Tom Kraljevic and Petr Maj, with contributions from the 0xdata team
Maintainer: Anqi Fu <anqi@0xdata.com>
Maintainer: Ariel Rao <ariel@0xdata.com>
Description: This is a package for running H2O via its REST API from within R.
License: Apache License (== 2.0)
Depends: R (>= 2.13.0), RCurl, rjson, statmod, tools, methods, utils
Expand Down
2 changes: 1 addition & 1 deletion R/h2o-package.template
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ Note that no actual data is stored in the R workspace; and no actual work is car
\author{
Anqi Fu, Tom Kraljevic and Petr Maj, with contributions from the 0xdata team

Maintainer: Anqi Fu <anqi@0xdata.com>
Maintainer: Ariel Rao <ariel@0xdata.com>
}
\references{
\itemize{
Expand Down
12 changes: 12 additions & 0 deletions R/h2o-package/man/AAA_DownloadAndStartBeforeExamples.Rd
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
\name{AAA_DownloadAndStartBeforeExamples}
\title{
Download H2O jar file and Start H2O cloud before examples run (for H2O developers only)
}
\description{
\code{AAA_DownloadAndStartBeforeExamples}, download H2O jar file and start H2O cloud before examples run. This is only relevant for H2O developers during the building of the CRAN package.
}

\examples{
library(h2o)
localH2O = h2o.init(ip = "localhost", port = 54321, startH2O = TRUE)
}
3 changes: 2 additions & 1 deletion R/h2o-package/man/H2OGLMModelList-class.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,9 @@ Objects can be created by calls of the form \code{new("H2OGLMModelList", ...)}.
}
\section{Slots}{
\describe{
\item{\code{models}:}{Object of class \code{"list"} containing \code{"H2OGLMModel"} objects representing the models returned from the lambda search }
\item{\code{models}:}{Object of class \code{"list"} containing \code{"H2OGLMModel"} objects representing the models returned from the lambda search. }
\item{\code{best_model}:}{Object of class \code{"numeric"} indicating the index of the model with the optimal lambda value in the above list. }
\item{\code{lambdas}:}{Object of class \code{"numeric"} indicating the optimal lambda value from the lambda search. }
}
}
\section{Methods}{
Expand Down
4 changes: 3 additions & 1 deletion R/h2o-package/man/H2OParsedData-class.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@
\alias{summary,H2OParsedData-method}
\alias{t,H2OParsedData-method}
\alias{tail,H2OParsedData-method}
\alias{trunc,H2OParsedData-method}
\alias{var,H2OParsedData-method}

\title{Class \code{"H2OParsedData"}}
Expand Down Expand Up @@ -212,8 +213,9 @@ Objects can be created by calls of the form \code{new("H2OParsedData", ...)}.
\item{summary}{\code{signature(object = "H2OParsedData")}: ... }
\item{t}{\code{signature(object = "H2OParsedData")}: ... }
\item{tail}{\code{signature(x = "H2OParsedData")}: ... }
\item{trunc}{\code{signature(x = "H2OParsedData")}: ... }
\item{var}{\code{signature(x = "H2OParsedData")}: ... }
}
}
}
\seealso{
%% ~~objects to See Also as \code{\link{~~fun~~}}, ~~~
Expand Down
2 changes: 1 addition & 1 deletion R/h2o-package/man/h2o.SpeeDRF.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ Performs single-node random forest classification on a data set.
}
\usage{
h2o.SpeeDRF(x, y, data, key = "", classification = TRUE, nfolds = 0, validation,
mtry = -1, ntree = 50, depth = 50, sample.rate = 2/3, oobee = TRUE,
mtry = -1, ntree = 50, depth = 20, sample.rate = 2/3, oobee = TRUE,
importance = FALSE, nbins = 1024, seed = -1, stat.type = "ENTROPY",
balance.classes = FALSE, verbose = FALSE)
}
Expand Down
3 changes: 2 additions & 1 deletion R/h2o-package/man/h2o.anomaly.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,8 @@ library(h2o)
localH2O = h2o.init(ip = "localhost", port = 54321, startH2O = TRUE)
prosPath = system.file("extdata", "prostate.csv", package = "h2o")
prostate.hex = h2o.importFile(localH2O, path = prosPath)
prostate.dl = h2o.deeplearning(x = 3:9, y = 2, data = prostate.hex, autoencoder = TRUE)
prostate.dl = h2o.deeplearning(x = 3:9, y = 2, data = prostate.hex, autoencoder = TRUE,
hidden = c(10, 10), epochs = 5)
prostate.anon = h2o.anomaly(prostate.hex, prostate.dl)
head(prostate.anon)
}
Expand Down
8 changes: 4 additions & 4 deletions R/h2o-package/man/h2o.deeplearning.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@ H2O: Deep Learning Neural Networks
Performs Deep Learning neural networks on an \code{\linkS4class{H2OParsedData}} object.
}
\usage{
h2o.deeplearning(x, y, data, key = "", checkpoint = "", classification = TRUE, nfolds = 0,
validation, autoencoder, use_all_factor_levels,
h2o.deeplearning(x, y, data, key = "",override_with_best_model, classification = TRUE,
nfolds = 0, validation, checkpoint = "", autoencoder, use_all_factor_levels,
activation, hidden, epochs, train_samples_per_iteration, seed, adaptive_rate,
rho, epsilon, rate, rate_annealing, rate_decay, momentum_start,
momentum_ramp, momentum_stable, nesterov_accelerated_gradient,
Expand All @@ -30,10 +30,10 @@ h2o.deeplearning(x, y, data, key = "", checkpoint = "", classification = TRUE, n
\item{data}{ An \code{\linkS4class{H2OParsedData}} object containing the variables in the model. }
\item{key}{ (Optional) The unique hex key assigned to the resulting model. If none is given, a key will automatically be generated.}
\item{override_with_best_model}{ If enabled, override the final model with the best model found during training. Defaults to true.}
\item{checkpoint}{"Model checkpoint (either key or H2ODeepLearningModel) to resume training with."}
\item{classification}{ (Optional) A logical value indicating whether the algorithm should conduct classification. }
\item{nfolds}{(Optional) Number of folds for cross-validation. If \code{nfolds >= 2}, then \code{validation} must remain empty.}
\item{validation}{(Optional) An \code{\linkS4class{H2OParsedData}} object indicating the validation dataset used to construct confusion matrix. If left blank, this defaults to the training data when \code{nfolds = 0}.}
\item{checkpoint}{"Model checkpoint (either key or H2ODeepLearningModel) to resume training with."}
\item{activation}{A string indicating the activation function to use. Must be either "Tanh", "TanhWithDropout", "Rectifier", "RectifierWithDropout", "Maxout" or "MaxoutWithDropout".}
\item{hidden}{ Hidden layer sizes (e.g. c(100,100)}

Expand Down Expand Up @@ -101,5 +101,5 @@ localH2O = h2o.init(ip = "localhost", port = 54321, startH2O = TRUE)
irisPath = system.file("extdata", "iris.csv", package = "h2o")
iris.hex = h2o.importFile(localH2O, path = irisPath)
h2o.deeplearning(x = 1:4, y = 5, data = iris.hex, activation = "Tanh",
hidden = c(50, 50, 50), epochs = 500)
hidden = c(10, 10), epochs = 5)
}
2 changes: 1 addition & 1 deletion R/h2o-package/man/h2o.getGLMLambdaModel.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ localH2O = h2o.init(ip = "localhost", port = 54321, startH2O = TRUE)
prosPath = system.file("extdata", "prostate.csv", package = "h2o")
prostate.hex = h2o.importFile(localH2O, path = prosPath)
prostate.srch = h2o.glm(x = 3:9, y = 2, data = prostate.hex, family = "binomial",
nlambda = 50, lambda_search = TRUE, nfolds = 2)
nlambda = 3, lambda_search = TRUE, nfolds = 2)
random_lambda = sample(prostate.srch@model$params$lambda_all, 1)
random_model = h2o.getGLMLambdaModel(prostate.srch, random_lambda)
}
Expand Down
4 changes: 2 additions & 2 deletions R/h2o-package/man/h2o.glm.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -109,10 +109,10 @@ localH2O = h2o.init(ip = "localhost", port = 54321, startH2O = TRUE)
prostate.hex = h2o.importURL(localH2O, path = paste("https://raw.github.com",
"0xdata/h2o/master/smalldata/logreg/prostate.csv", sep = "/"), key = "prostate.hex")
h2o.glm(y = "CAPSULE", x = c("AGE","RACE","PSA","DCAPS"), data = prostate.hex, family = "binomial",
nfolds = 10, alpha = 0.5)
nfolds = 2, alpha = 0.5)
# Run GLM of VOL ~ CAPSULE + AGE + RACE + PSA + GLEASON
myX = setdiff(colnames(prostate.hex), c("ID", "DPROS", "DCAPS", "VOL"))
h2o.glm(y = "VOL", x = myX, data = prostate.hex, family = "gaussian", nfolds = 5, alpha = 0.1)
h2o.glm(y = "VOL", x = myX, data = prostate.hex, family = "gaussian", nfolds = 2, alpha = 0.1)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
Expand Down
2 changes: 2 additions & 0 deletions R/h2o-package/man/h2o.ignoreColumns.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,12 @@ h2o.ignoreColumns(data, max_na = 0.2)
\value{Returns a vector of column names.
}
\examples{
\dontrun{
library(h2o)
localH2O = h2o.init(ip = "localhost", port = 54321, startH2O = TRUE)
airlinesURL = "https://s3.amazonaws.com/h2o-airlines-unpacked/allyears2k.csv"
airlines.hex = h2o.importFile(localH2O, path = airlinesURL, key = "airlines.hex")
h2o.ignoreColumns(airlines.hex)
}
}

2 changes: 2 additions & 0 deletions R/h2o-package/man/h2o.importFolder.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -52,13 +52,15 @@ If \code{parse = TRUE}, the function returns an object of class \code{\linkS4cla
\code{\link{h2o.importFile}, \link{h2o.importHDFS}, \link{h2o.importURL}, \link{h2o.uploadFile}}
}
\examples{
\dontrun{
library(h2o)
localH2O = h2o.init(ip = "localhost", port = 54321, startH2O = TRUE)
myPath = system.file("extdata", "prostate_folder", package = "h2o")
prostate_all.hex = h2o.importFolder(localH2O, path = myPath)
class(prostate_all.hex)
summary(prostate_all.hex)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
Expand Down
3 changes: 2 additions & 1 deletion R/h2o-package/man/h2o.saveModel.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,13 @@
\alias{h2o.saveModel}
\title{Save a H2OModel object to disk.}
\description{Save a H2OModel object to a disk and can be loaded back into H2O using h2o.loadModel.}
\usage{h2o.saveModel(object, dir, name, force=FALSE)}
\usage{h2o.saveModel(object, dir="", name="", filename = "", force=FALSE)}

\arguments{
\item{object}{ An \code{\linkS4class{H2OModel}} object.}
\item{dir}{ Directory the model file will be written to.}
\item{name}{ Name of the file being saved.}
\item{filename}{ Full path of directory and name of file being saved. Will override \code{dir} and \code{name} parameters if also given. }
\item{force}{ (Optional) If \code{force = TRUE} any existing file will be overwritten. Otherwise if the file already exists the operation will fail.}
}

Expand Down
23 changes: 23 additions & 0 deletions R/h2o-package/man/round.Rd
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
\name{round.H2OParsedData}
\alias{round.H2OParsedData}
\title{Rounds argument.}
\description{Rounds the values in the first argument of a \code{\linkS4class{H2OParsedData}} object to the specified number of decimal places. Default is 0.}
\usage{\method{round}{H2OParsedData}(x, digits=0)}

\arguments{
\item{x}{ An \code{\linkS4class{H2OParsedData}} object.}
\item{digits}{ Single number specificing decimal places to round to.}
}
\value{
Returns a \code{\linkS4class{H2OParsedData}} object with argument rounded as specified.
}

\examples{
library(h2o)
localH2O = h2o.init(ip = "localhost", port = 54321, startH2O = TRUE)
irisPath = system.file("extdata", "iris.csv", package="h2o")
iris.hex = h2o.importFile(localH2O, path = irisPath)
iris.data <- iris.hex[,1:3]
iris.rounded <- round.H2OParsedData(iris.data, digits=0)
head(iris.rounded)
}
14 changes: 14 additions & 0 deletions R/h2o-package/man/zzz_ShutdownAfterExamples.Rd
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
\name{zzz_ShutdownAfterExamples}
\title{
Shutdown H2O cloud after examples run (for H2O developers only)
}
\description{
\code{zzz_ShutdownAfterExamples}, shutdown H2O cloud after examples run. This is only relevant for H2O developers during the building of the CRAN package.
}

\examples{
library(h2o)
localH2O = h2o.init(ip = "localhost", port = 54321, startH2O = TRUE)
h2o.shutdown(localH2O, prompt = FALSE)
Sys.sleep(2)
}
1 change: 1 addition & 0 deletions R/tests/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -1061,6 +1061,7 @@ def _report_test_result(self, test):
def _log(self, s):
f = self._get_summary_filehandle_for_appending()
print(s)
sys.stdout.flush()
f.write(s + "\n")
f.close()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,22 +62,22 @@ test.GBM.bernoulli.SyntheticData <- function(conn) {
num_models = length(tru.gbm@sumtable)
print(paste("Number of gbm models created:", num_models,sep ='') )
expect_equal(num_models,36)

for(i in 1:num_models){
model = tru.gbm@model[[i]]
gg<-gbm(y~., data=all.data2, distribution="bernoulli", n.trees=tru.gbm@sumtable[[i]]$ntrees,
interaction.depth=tru.gbm@sumtable[[i]]$max_depth,n.minobsinnode=tru.gbm@sumtable[[i]]$min_rows,
shrinkage=tru.gbm@sumtable[[i]]$learn_rate,bag.fraction=1) # R gbm model
mm_y=predict.gbm(gg,newdata=test.data2,n.trees=tru.gbm@sumtable[[i]]$ntrees,type='response') # R Predict
gg<-gbm(y~., data=all.data2, distribution="bernoulli", n.trees=tru.gbm@sumtable[[i]]$n.trees,
interaction.depth=tru.gbm@sumtable[[i]]$interaction.depth,n.minobsinnode=tru.gbm@sumtable[[i]]$n.minobsinnode,
shrinkage=tru.gbm@sumtable[[i]]$shrinkage,bag.fraction=1) # R gbm model
mm_y=predict.gbm(gg,newdata=test.data2,n.trees=tru.gbm@sumtable[[i]]$n.trees,type='response') # R Predict
R_auc = round(gbm.roc.area(test.data2$y,mm_y), digits=2)
pred = h2o.predict(model,test) #H2O Predict
H2O_perf = h2o.performance(pred$'1',test$y,measure="F1")
H2O_auc = round(H2O_perf@model$auc, digits=2)
print(paste ( tru.gbm@sumtable[[i]]$model_key,
" trees:", tru.gbm@sumtable[[i]]$ntrees,
" depth:",tru.gbm@sumtable[[i]]$max_depth,
" shrinkage:",tru.gbm@sumtable[[i]]$learn_rate,
" min row: ",tru.gbm@sumtable[[i]]$min_rows,
" trees:", tru.gbm@sumtable[[i]]$n.trees,
" depth:",tru.gbm@sumtable[[i]]$interaction.depth,
" shrinkage:",tru.gbm@sumtable[[i]]$shrinkage,
" min row: ",tru.gbm@sumtable[[i]]$n.minobsinnode,
" bins:",tru.gbm@sumtable[[i]]$nbins,
" H2O_auc:", H2O_auc,
" R_auc:", R_auc, sep=''),quote=F)
Expand Down
8 changes: 4 additions & 4 deletions R/tests/testdir_demos/runit_demo_SciworkFlow_medium.R
Original file line number Diff line number Diff line change
Expand Up @@ -87,10 +87,10 @@ for ( i in 1:num_models ) {
pred = h2o.predict ( model, pros.test )
perf = h2o.performance ( pred$'1', pros.test$CAPSULE, measure="F1" )

print ( paste ( pros.gbm@sumtable[[i]]$model_key, " trees:", pros.gbm@sumtable[[i]]$ntrees,
" depth:", pros.gbm@sumtable[[i]]$max_depth,
" shrinkage:", pros.gbm@sumtable[[i]]$learn_rate,
" min row: ", pros.gbm@sumtable[[i]]$min_rows,
print ( paste ( pros.gbm@sumtable[[i]]$model_key, " trees:", pros.gbm@sumtable[[i]]$n.trees,
" depth:", pros.gbm@sumtable[[i]]$interaction.depth,
" shrinkage:", pros.gbm@sumtable[[i]]$shrinkage,
" min row: ", pros.gbm@sumtable[[i]]$n.minobsinnode,
" bins:", pros.gbm@sumtable[[i]]$nbins,
" auc:", round(perf@model$auc, digits=4), sep=''), quote=F)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
# Testing glm cross validation performance with adult dataset
##

stop("Skip actual run for failing NOPASS test")

setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../findNSourceUtils.R')
Expand Down Expand Up @@ -36,4 +37,4 @@ test <- function(conn) {
testEnd()
}

doTest("Testing glm cross validation performance with adult dataset", test)
doTest("Testing glm cross validation performance with adult dataset", test)
Binary file modified docs/deeplearning/DeepLearningRVignette.pdf
Binary file not shown.
6 changes: 3 additions & 3 deletions docs/deeplearning/DeepLearningRVignette.tex
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,7 @@ \subsubsection{Extracting and handling the results} \label{3.2.1}
\end{spverbatim}
\noindent
The latter command returns the trained model's training and validation error. The training error value is based on the parameter \texttt{score\_training\_samples}, which specifies the number of randomly sampled training points to be used for scoring; the default uses 10,000 points. The validation error is based on the parameter \texttt{score\_validation\_samples}, which controls the same value on the validation set and is set by default to be the entire validation set.
In general choosing more sampled points leads to a better idea of the model's performance on your dataset; setting either of these parameters to 0 automatically uses the entire corresponding dataset for scoring. Either way, however, you can control the mininimum
In general choosing more sampled points leads to a better idea of the model's performance on your dataset; setting either of these parameters to 0 automatically uses the entire corresponding dataset for scoring. Either way, however, you can control the minimum
and maximum time spent on scoring with the \texttt{score\_interval} and \texttt{score\_duty\_cycle} parameters.
\\
\\
Expand All @@ -403,7 +403,7 @@ \subsection{Web interface} \label{3.3}
\subsubsection{Variable importances} \label{3.3.1}
One particularly useful visualization in the web interface is the variable importances, which can be enabled with the additional argument \texttt{variable\_importanes=TRUE}. This features allows us to witness the absolute and relative predictive strength of each feature in the classification task. For the purposes of digit classification, this aspect is less useful since each feature merely correponds to a pixel number that is not relevant to the model's more abstract feature representation; but it is useful in general for less uniform data.
One particularly useful visualization in the web interface is the variable importances, which can be enabled with the additional argument \texttt{variable\_importances=TRUE}. This features allows us to witness the absolute and relative predictive strength of each feature in the classification task. For the purposes of digit classification, this aspect is less useful since each feature merely correponds to a pixel number that is not relevant to the model's more abstract feature representation; but it is useful in general for less uniform data.
\subsubsection{Java model} \label{3.3.2}
Expand All @@ -417,7 +417,7 @@ \subsection{Grid search for model comparison} \label{3.4}
#Create a set of network topologies
hidden_layers = list(c(200,200), c(100,300,100),c(500,500,500))
mnist_model_grid = h2o.deeplearning(x=1:784, y=785, data=train_images.hex, activation="RectifierWithDropout", hidden=hidden_layers, validation = test_images.hex, epochs=1, l1=c(1e-5,1e-7), input_dropout_ratio=0.2)
mnist_model_grid = h2o.deeplearning(x = 1:784, y = 785, data = train_images.hex, activation = "RectifierWithDropout", hidden = hidden_layers, validation = test_images.hex, epochs = 1, l1 = c(1e-5,1e-7), input_dropout_ratio = 0.2)
\end{spverbatim}
\noindent
Expand Down
9 changes: 6 additions & 3 deletions py/h2o_import.py
Original file line number Diff line number Diff line change
Expand Up @@ -447,9 +447,12 @@ def delete_keys_at_all_nodes(node=None, pattern=None, timeoutSecs=120):
# this will be interesting if the others don't have a complete set
# theoretically, the deletes should be 0 after the first node
# since the deletes should be global
for node in reversed(h2o.nodes):
deletedCnt = delete_keys(node, pattern=pattern, timeoutSecs=timeoutSecs)
totalDeletedCnt += deletedCnt
# for node in reversed(h2o.nodes):

# new: only use the directed node (node[0] typically)
# h2o storeview should have a global view now.
deletedCnt = delete_keys(node, pattern=pattern, timeoutSecs=timeoutSecs)
totalDeletedCnt += deletedCnt

if pattern:
print "Total: Deleted", totalDeletedCnt, "keys with filter=", pattern, "at", len(h2o.nodes), "nodes"
Expand Down
8 changes: 4 additions & 4 deletions py/testdir_multi_jvm/test_parse_time_fvec.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,8 @@ def getRandomTimeStamp():
# assume leading zero is option
day = days[h2o_util.weighted_choice(dayWeights)]
# may or may not leading zero fill the day
if random.randint(0,1) == 1:
day = day.zfill(2)
# if random.randint(0,1) == 1:
# day = day.zfill(2)

# yy year
timestampFormat = random.randint(0,3)
Expand All @@ -129,8 +129,8 @@ def getRandomTimeStamp():
else:
month = str(random.randint(1,12))
# may or may not leading zero fill the month
if random.randint(0,1) == 1:
month = month.zfill(2)
# if random.randint(0,1) == 1:
# month = month.zfill(2)

# may or may not leading zero fill the hour
hour = str(random.randint(0,23))
Expand Down
10 changes: 1 addition & 9 deletions py/testdir_release/c2/test_c2_fvec.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,16 +68,8 @@ def sub_c2_fvec_long(self):
h2o.cloudPerfH2O.message(msg)

if DO_GLM:
# these are all the columns that are enums in the dataset...too many for GLM!
x = range(542) # don't include the output column
# remove the output too! (378)
ignore_x = []
for i in [3,4,5,6,7,8,9,10,11,14,16,17,18,19,20,424,425,426,540,541]:
x.remove(i)
ignore_x.append(i)

# plus 1 because we are no longer 0 offset
x = ",".join(map(lambda x: "C" + str(x+1), x))
ignore_x = [3,4,5,6,7,8,9,10,11,14,16,17,18,19,20,424,425,426,540,541]
ignore_x = ",".join(map(lambda x: "C" + str(x+1), ignore_x))

GLMkwargs = {
Expand Down
Loading

0 comments on commit 16f16cc

Please sign in to comment.