forked from scikit-learn/scikit-learn
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path_sequential.py
309 lines (257 loc) · 11.7 KB
/
_sequential.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
"""
Sequential feature selection
"""
import numbers
import numpy as np
import warnings
from ._base import SelectorMixin
from ..base import BaseEstimator, MetaEstimatorMixin, clone
from ..utils._tags import _safe_tags
from ..utils.validation import check_is_fitted
from ..model_selection import cross_val_score
class SequentialFeatureSelector(SelectorMixin, MetaEstimatorMixin, BaseEstimator):
"""Transformer that performs Sequential Feature Selection.
This Sequential Feature Selector adds (forward selection) or
removes (backward selection) features to form a feature subset in a
greedy fashion. At each stage, this estimator chooses the best feature to
add or remove based on the cross-validation score of an estimator. In
the case of unsupervised learning, this Sequential Feature Selector
looks only at the features (X), not the desired outputs (y).
Read more in the :ref:`User Guide <sequential_feature_selection>`.
.. versionadded:: 0.24
Parameters
----------
estimator : estimator instance
An unfitted estimator.
n_features_to_select : "auto", int or float, default='warn'
If `"auto"`, the behaviour depends on the `tol` parameter:
- if `tol` is not `None`, then features are selected until the score
improvement does not exceed `tol`.
- otherwise, half of the features are selected.
If integer, the parameter is the absolute number of features to select.
If float between 0 and 1, it is the fraction of features to select.
.. versionadded:: 1.1
The option `"auto"` was added in version 1.1.
.. deprecated:: 1.1
The default changed from `None` to `"warn"` in 1.1 and will become
`"auto"` in 1.3. `None` and `'warn'` will be removed in 1.3.
To keep the same behaviour as `None`, set
`n_features_to_select="auto" and `tol=None`.
tol : float, default=None
If the score is not incremented by at least `tol` between two
consecutive feature additions or removals, stop adding or removing.
`tol` is enabled only when `n_features_to_select` is `"auto"`.
.. versionadded:: 1.1
direction : {'forward', 'backward'}, default='forward'
Whether to perform forward selection or backward selection.
scoring : str, callable, list/tuple or dict, default=None
A single str (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
NOTE that when using custom scorers, each scorer should return a single
value. Metric functions returning a list/array of values can be wrapped
into multiple scorers that return one value each.
If None, the estimator's score method is used.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : int, default=None
Number of jobs to run in parallel. When evaluating a new feature to
add or remove, the cross-validation procedure is parallel over the
folds.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_features_to_select_ : int
The number of features that were selected.
support_ : ndarray of shape (n_features,), dtype=bool
The mask of selected features.
See Also
--------
GenericUnivariateSelect : Univariate feature selector with configurable
strategy.
RFE : Recursive feature elimination based on importance weights.
RFECV : Recursive feature elimination based on importance weights, with
automatic selection of the number of features.
SelectFromModel : Feature selection based on thresholds of importance
weights.
Examples
--------
>>> from sklearn.feature_selection import SequentialFeatureSelector
>>> from sklearn.neighbors import KNeighborsClassifier
>>> from sklearn.datasets import load_iris
>>> X, y = load_iris(return_X_y=True)
>>> knn = KNeighborsClassifier(n_neighbors=3)
>>> sfs = SequentialFeatureSelector(knn, n_features_to_select=3)
>>> sfs.fit(X, y)
SequentialFeatureSelector(estimator=KNeighborsClassifier(n_neighbors=3),
n_features_to_select=3)
>>> sfs.get_support()
array([ True, False, True, True])
>>> sfs.transform(X).shape
(150, 3)
"""
def __init__(
self,
estimator,
*,
n_features_to_select="warn",
tol=None,
direction="forward",
scoring=None,
cv=5,
n_jobs=None,
):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.tol = tol
self.direction = direction
self.scoring = scoring
self.cv = cv
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Learn the features to select from X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of predictors.
y : array-like of shape (n_samples,), default=None
Target values. This parameter may be ignored for
unsupervised learning.
Returns
-------
self : object
Returns the instance itself.
"""
# FIXME: to be removed in 1.3
if self.n_features_to_select in ("warn", None):
# for backwards compatibility
warnings.warn(
"Leaving `n_features_to_select` to "
"None is deprecated in 1.0 and will become 'auto' "
"in 1.3. To keep the same behaviour as with None "
"(i.e. select half of the features) and avoid "
"this warning, you should manually set "
"`n_features_to_select='auto'` and set tol=None "
"when creating an instance.",
FutureWarning,
)
tags = self._get_tags()
X = self._validate_data(
X,
accept_sparse="csc",
ensure_min_features=2,
force_all_finite=not tags.get("allow_nan", True),
)
n_features = X.shape[1]
# FIXME: to be fixed in 1.3
error_msg = (
"n_features_to_select must be either 'auto', 'warn', "
"None, an integer in [1, n_features - 1] "
"representing the absolute "
"number of features, or a float in (0, 1] "
"representing a percentage of features to "
f"select. Got {self.n_features_to_select}"
)
if self.n_features_to_select in ("warn", None):
if self.tol is not None:
raise ValueError("tol is only enabled if `n_features_to_select='auto'`")
self.n_features_to_select_ = n_features // 2
elif self.n_features_to_select == "auto":
if self.tol is not None:
# With auto feature selection, `n_features_to_select_` will be updated
# to `support_.sum()` after features are selected.
self.n_features_to_select_ = n_features - 1
else:
self.n_features_to_select_ = n_features // 2
elif isinstance(self.n_features_to_select, numbers.Integral):
if not 0 < self.n_features_to_select < n_features:
raise ValueError(error_msg)
self.n_features_to_select_ = self.n_features_to_select
elif isinstance(self.n_features_to_select, numbers.Real):
if not 0 < self.n_features_to_select <= 1:
raise ValueError(error_msg)
self.n_features_to_select_ = int(n_features * self.n_features_to_select)
else:
raise ValueError(error_msg)
if self.direction not in ("forward", "backward"):
raise ValueError(
"direction must be either 'forward' or 'backward'. "
f"Got {self.direction}."
)
cloned_estimator = clone(self.estimator)
# the current mask corresponds to the set of features:
# - that we have already *selected* if we do forward selection
# - that we have already *excluded* if we do backward selection
current_mask = np.zeros(shape=n_features, dtype=bool)
n_iterations = (
self.n_features_to_select_
if self.n_features_to_select == "auto" or self.direction == "forward"
else n_features - self.n_features_to_select_
)
old_score = -np.inf
is_auto_select = self.tol is not None and self.n_features_to_select == "auto"
for _ in range(n_iterations):
new_feature_idx, new_score = self._get_best_new_feature_score(
cloned_estimator, X, y, current_mask
)
if is_auto_select and ((new_score - old_score) < self.tol):
break
old_score = new_score
current_mask[new_feature_idx] = True
if self.direction == "backward":
current_mask = ~current_mask
self.support_ = current_mask
self.n_features_to_select_ = self.support_.sum()
return self
def _get_best_new_feature_score(self, estimator, X, y, current_mask):
# Return the best new feature and its score to add to the current_mask,
# i.e. return the best new feature and its score to add (resp. remove)
# when doing forward selection (resp. backward selection).
# Feature will be added if the current score and past score are greater
# than tol when n_feature is auto,
candidate_feature_indices = np.flatnonzero(~current_mask)
scores = {}
for feature_idx in candidate_feature_indices:
candidate_mask = current_mask.copy()
candidate_mask[feature_idx] = True
if self.direction == "backward":
candidate_mask = ~candidate_mask
X_new = X[:, candidate_mask]
scores[feature_idx] = cross_val_score(
estimator,
X_new,
y,
cv=self.cv,
scoring=self.scoring,
n_jobs=self.n_jobs,
).mean()
new_feature_idx = max(scores, key=lambda feature_idx: scores[feature_idx])
return new_feature_idx, scores[new_feature_idx]
def _get_support_mask(self):
check_is_fitted(self)
return self.support_
def _more_tags(self):
return {
"allow_nan": _safe_tags(self.estimator, key="allow_nan"),
}