@@ -32,7 +32,7 @@ K-means method instead of the original samples::
3232 [(0, 64), (1, 262), (2, 4674)]
3333 >>> from imblearn.under_sampling import ClusterCentroids
3434 >>> cc = ClusterCentroids(random_state=0)
35- >>> X_resampled, y_resampled = cc.fit_sample (X, y)
35+ >>> X_resampled, y_resampled = cc.fit_resample (X, y)
3636 >>> print(sorted(Counter(y_resampled).items()))
3737 [(0, 64), (1, 64), (2, 64)]
3838
@@ -82,7 +82,7 @@ randomly selecting a subset of data for the targeted classes::
8282
8383 >>> from imblearn.under_sampling import RandomUnderSampler
8484 >>> rus = RandomUnderSampler(random_state=0)
85- >>> X_resampled, y_resampled = rus.fit_sample (X, y)
85+ >>> X_resampled, y_resampled = rus.fit_resample (X, y)
8686 >>> print(sorted(Counter(y_resampled).items()))
8787 [(0, 64), (1, 64), (2, 64)]
8888
@@ -99,7 +99,7 @@ by considering independently each targeted class::
9999 >>> print(np.vstack({tuple(row) for row in X_resampled}).shape)
100100 (192, 2)
101101 >>> rus = RandomUnderSampler(random_state=0, replacement=True)
102- >>> X_resampled, y_resampled = rus.fit_sample (X, y)
102+ >>> X_resampled, y_resampled = rus.fit_resample (X, y)
103103 >>> print(np.vstack({tuple(row) for row in X_resampled}).shape)
104104 (181, 2)
105105
@@ -109,7 +109,7 @@ In addition, :class:`RandomUnderSampler` allows to sample heterogeneous data
109109 >>> X_hetero = np.array([['xxx', 1, 1.0], ['yyy', 2, 2.0], ['zzz', 3, 3.0]],
110110 ... dtype=np.object)
111111 >>> y_hetero = np.array([0, 0, 1])
112- >>> X_resampled, y_resampled = rus.fit_sample (X_hetero, y_hetero)
112+ >>> X_resampled, y_resampled = rus.fit_resample (X_hetero, y_hetero)
113113 >>> print(X_resampled)
114114 [['xxx' 1 1.0]
115115 ['zzz' 3 3.0]]
@@ -126,7 +126,7 @@ be selected with the parameter ``version``::
126126
127127 >>> from imblearn.under_sampling import NearMiss
128128 >>> nm1 = NearMiss(version=1)
129- >>> X_resampled_nm1, y_resampled = nm1.fit_sample (X, y)
129+ >>> X_resampled_nm1, y_resampled = nm1.fit_resample (X, y)
130130 >>> print(sorted(Counter(y_resampled).items()))
131131 [(0, 64), (1, 64), (2, 64)]
132132
@@ -261,7 +261,7 @@ the sample inspected to keep it in the dataset::
261261 [(0, 64), (1, 262), (2, 4674)]
262262 >>> from imblearn.under_sampling import EditedNearestNeighbours
263263 >>> enn = EditedNearestNeighbours()
264- >>> X_resampled, y_resampled = enn.fit_sample (X, y)
264+ >>> X_resampled, y_resampled = enn.fit_resample (X, y)
265265 >>> print(sorted(Counter(y_resampled).items()))
266266 [(0, 64), (1, 213), (2, 4568)]
267267
@@ -275,7 +275,7 @@ Generally, repeating the algorithm will delete more data::
275275
276276 >>> from imblearn.under_sampling import RepeatedEditedNearestNeighbours
277277 >>> renn = RepeatedEditedNearestNeighbours()
278- >>> X_resampled, y_resampled = renn.fit_sample (X, y)
278+ >>> X_resampled, y_resampled = renn.fit_resample (X, y)
279279 >>> print(sorted(Counter(y_resampled).items()))
280280 [(0, 64), (1, 208), (2, 4551)]
281281
@@ -285,7 +285,7 @@ internal nearest neighbors algorithm is increased at each iteration::
285285
286286 >>> from imblearn.under_sampling import AllKNN
287287 >>> allknn = AllKNN()
288- >>> X_resampled, y_resampled = allknn.fit_sample (X, y)
288+ >>> X_resampled, y_resampled = allknn.fit_resample (X, y)
289289 >>> print(sorted(Counter(y_resampled).items()))
290290 [(0, 64), (1, 220), (2, 4601)]
291291
@@ -323,7 +323,7 @@ The :class:`CondensedNearestNeighbour` can be used in the following manner::
323323
324324 >>> from imblearn.under_sampling import CondensedNearestNeighbour
325325 >>> cnn = CondensedNearestNeighbour(random_state=0)
326- >>> X_resampled, y_resampled = cnn.fit_sample (X, y)
326+ >>> X_resampled, y_resampled = cnn.fit_resample (X, y)
327327 >>> print(sorted(Counter(y_resampled).items()))
328328 [(0, 64), (1, 24), (2, 115)]
329329
@@ -338,7 +338,7 @@ used as::
338338
339339 >>> from imblearn.under_sampling import OneSidedSelection
340340 >>> oss = OneSidedSelection(random_state=0)
341- >>> X_resampled, y_resampled = oss.fit_sample (X, y)
341+ >>> X_resampled, y_resampled = oss.fit_resample (X, y)
342342 >>> print(sorted(Counter(y_resampled).items()))
343343 [(0, 64), (1, 174), (2, 4403)]
344344
@@ -352,7 +352,7 @@ neighbors classifier. The class can be used as::
352352
353353 >>> from imblearn.under_sampling import NeighbourhoodCleaningRule
354354 >>> ncr = NeighbourhoodCleaningRule()
355- >>> X_resampled, y_resampled = ncr.fit_sample (X, y)
355+ >>> X_resampled, y_resampled = ncr.fit_resample (X, y)
356356 >>> print(sorted(Counter(y_resampled).items()))
357357 [(0, 64), (1, 234), (2, 4666)]
358358
@@ -380,7 +380,7 @@ removed. The class can be used as::
380380 >>> from imblearn.under_sampling import InstanceHardnessThreshold
381381 >>> iht = InstanceHardnessThreshold(random_state=0,
382382 ... estimator=LogisticRegression())
383- >>> X_resampled, y_resampled = iht.fit_sample (X, y)
383+ >>> X_resampled, y_resampled = iht.fit_resample (X, y)
384384 >>> print(sorted(Counter(y_resampled).items()))
385385 [(0, 64), (1, 64), (2, 64)]
386386
0 commit comments