[a,b]
的形式,则生成2阶多项式特征之后为[1, a, b, ab, a^2, b^2]
[1,a,b,ab]
import numpy as np
from sklearn import preprocessing
x = np.arange(6).reshape(3,2)
print(x)
poly = preprocessing.PolynomialFeatures( 2 )
y0 = poly.fit_transform( x )
poly = preprocessing.PolynomialFeatures( 2, interaction_only=True )
y1 = poly.fit_transform(x)
poly = preprocessing.PolynomialFeatures( 2, include_bias=False )
y2 = poly.fit_transform(x)
print(y0)
print(y1)
print(y2)
[[0 1]
[2 3]
[4 5]]
[[ 1. 0. 1. 0. 0. 1.]
[ 1. 2. 3. 4. 6. 9.]
[ 1. 4. 5. 16. 20. 25.]]
[[ 1. 0. 1. 0.]
[ 1. 2. 3. 6.]
[ 1. 4. 5. 20.]]
[[ 0. 1. 0. 0. 1.]
[ 2. 3. 4. 6. 9.]
[ 4. 5. 16. 20. 25.]]
from sklearn.model_selection import train_test_split
data = np.arange(50).reshape(10,5)
train_set, test_set = train_test_split(data, test_size=0.2)
from sklearn.model_selection import StratifiedShuffleSplit
X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
y = np.array([0, 0, 1, 1])
sss = StratifiedShuffleSplit(n_splits=3, test_size=0.5)
sss.get_n_splits( X, y )
for train_index, test_index in sss.split(X, y):
print( "train index : ", train_index, ", test index : ", test_index )
train index : [1 3] , test index : [0 2]
train index : [2 0] , test index : [3 1]
train index : [2 1] , test index : [3 0]
import pandas as pd
import os
csv_path = os.path.join("./datasets/housing", "housing.csv")
housing = pd.read_csv( csv_path )
housing.info()
from sklearn.preprocessing import Imputer
imputer = Imputer( strategy="median" )
housing_num = housing.drop( "ocean_proximity", axis=1 )
# housing_num.info()
imputer.fit( housing_num )
# 返回的X为np.ndarray格式
X = imputer.transform( housing_num )
# 将数据转化为pandas的Dataframe格式
housing_tr = pd.DataFrame(X, columns=housing_num.columns)
housing_tr.info()
RangeIndex: 20640 entries, 0 to 20639
Data columns (total 10 columns):
longitude 20640 non-null float64
latitude 20640 non-null float64
housing_median_age 20640 non-null float64
total_rooms 20640 non-null float64
total_bedrooms 20433 non-null float64
population 20640 non-null float64
households 20640 non-null float64
median_income 20640 non-null float64
median_house_value 20640 non-null float64
ocean_proximity 20640 non-null object
dtypes: float64(9), object(1)
memory usage: 1.6+ MB
(20640, 9)
pandas.core.frame.DataFrame
LabelEncoder
将类别转化为对应的唯一的数字one-hot encoding
对类别进行编码,N X 1
的类别信息会被转化为N X m
的矩阵,m为类别的个数。输入为上述LabelEncoder生成的矩阵,输出是scipy的稀疏矩阵,采用toarray
方法就可以将其转化为np.ndarray
矩阵LabelBinarizer
,此时默认返回的是arrayfrom sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelBinarizer
encoder = LabelEncoder()
housing_cat = housing["ocean_proximity"]
housing_cat_encoded = encoder.fit_transform( housing_cat )
housing_cat_encoded
print( encoder.classes_ )
encoder = OneHotEncoder()
housing_cat = housing["ocean_proximity"]
housing_cat_1hot = encoder.fit_transform( housing_cat_encoded.reshape(-1,1) )
print(type(housing_cat_1hot))
print(housing_cat_1hot.toarray())
encoder = LabelBinarizer()
house_cat_1hot_lb = encoder.fit_transform( housing_cat )
print(house_cat_1hot_lb)
['<1H OCEAN' 'INLAND' 'ISLAND' 'NEAR BAY' 'NEAR OCEAN']
[[ 0. 0. 0. 1. 0.]
[ 0. 0. 0. 1. 0.]
[ 0. 0. 0. 1. 0.]
...,
[ 0. 1. 0. 0. 0.]
[ 0. 1. 0. 0. 0.]
[ 0. 1. 0. 0. 0.]]
[[0 0 0 1 0]
[0 0 0 1 0]
[0 0 0 1 0]
...,
[0 1 0 0 0]
[0 1 0 0 0]
[0 1 0 0 0]]
管道之间可以相互连接,形成一个新的管道,所有的管道都有fit_transform
方法,用于生成需要的数据
首先创建一个transformer,同时增加一些额外的属性
from sklearn.base import BaseEstimator, TransformerMixin
# column index
rooms_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
def __init__(self, add_bedrooms_per_room = True): # no *args or **kargs
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X, y=None):
return self # nothing else to do
def transform(self, X, y=None):
rooms_per_household = X[:, rooms_ix] / X[:, household_ix]
population_per_household = X[:, population_ix] / X[:, household_ix]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]
return np.c_[X, rooms_per_household, population_per_household,
bedrooms_per_room]
else:
return np.c_[X, rooms_per_household, population_per_household]
from sklearn.base import BaseEstimator, TransformerMixin
# Create a class to select numerical or categorical columns
# since Scikit-Learn doesn't handle DataFrames yet
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)
housing_extra_attribs = attr_adder.transform(housing.values)
# Definition of the CategoricalEncoder class, copied from PR #9151.
# Just run this cell, or copy it to your code, do not try to understand it (yet).
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_array
from sklearn.preprocessing import LabelEncoder
from scipy import sparse
class CategoricalEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical features as a numeric array.
The input to this transformer should be a matrix of integers or strings,
denoting the values taken on by categorical (discrete) features.
The features can be encoded using a one-hot aka one-of-K scheme
(``encoding='onehot'``, the default) or converted to ordinal integers
(``encoding='ordinal'``).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide `.
Parameters
----------
encoding : str, 'onehot', 'onehot-dense' or 'ordinal'
The type of encoding to use (default is 'onehot'):
- 'onehot': encode the features using a one-hot aka one-of-K scheme
(or also called 'dummy' encoding). This creates a binary column for
each category and returns a sparse matrix.
- 'onehot-dense': the same as 'onehot' but returns a dense array
instead of a sparse matrix.
- 'ordinal': encode the features as ordinal integers. This results in
a single column of integers (0 to n_categories - 1) per feature.
categories : 'auto' or a list of lists/arrays of values.
Categories (unique values) per feature:
- 'auto' : Determine categories automatically from the training data.
- list : ``categories[i]`` holds the categories expected in the ith
column. The passed categories are sorted before encoding the data
(used categories can be found in the ``categories_`` attribute).
dtype : number type, default np.float64
Desired dtype of output.
handle_unknown : 'error' (default) or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform (default is to raise). When this is parameter
is set to 'ignore' and an unknown category is encountered during
transform, the resulting one-hot encoded columns for this feature
will be all zeros.
Ignoring unknown categories is not supported for
``encoding='ordinal'``.
Attributes
----------
categories_ : list of arrays
The categories of each feature determined during fitting. When
categories were specified manually, this holds the sorted categories
(in order corresponding with output of `transform`).
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import CategoricalEncoder
>>> enc = CategoricalEncoder(handle_unknown='ignore')
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], [1, 0, 2]])
... # doctest: +ELLIPSIS
CategoricalEncoder(categories='auto', dtype=<... 'numpy.float64'>,
encoding='onehot', handle_unknown='ignore')
>>> enc.transform([[0, 1, 1], [1, 0, 4]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.],
[ 0., 1., 1., 0., 0., 0., 0., 0., 0.]])
See also
--------
sklearn.preprocessing.OneHotEncoder : performs a one-hot encoding of
integer ordinal features. The ``OneHotEncoder assumes`` that input
features take on values in the range ``[0, max(feature)]`` instead of
using the unique values.
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, encoding='onehot', categories='auto', dtype=np.float64,
handle_unknown='error'):
self.encoding = encoding
self.categories = categories
self.dtype = dtype
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit the CategoricalEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
The data to determine the categories of each feature.
Returns
-------
self
"""
if self.encoding not in ['onehot', 'onehot-dense', 'ordinal']:
template = ("encoding should be either 'onehot', 'onehot-dense' "
"or 'ordinal', got %s")
raise ValueError(template % self.handle_unknown)
if self.handle_unknown not in ['error', 'ignore']:
template = ("handle_unknown should be either 'error' or "
"'ignore', got %s")
raise ValueError(template % self.handle_unknown)
if self.encoding == 'ordinal' and self.handle_unknown == 'ignore':
raise ValueError("handle_unknown='ignore' is not supported for"
" encoding='ordinal'")
X = check_array(X, dtype=np.object, accept_sparse='csc', copy=True)
n_samples, n_features = X.shape
self._label_encoders_ = [LabelEncoder() for _ in range(n_features)]
for i in range(n_features):
le = self._label_encoders_[i]
Xi = X[:, i]
if self.categories == 'auto':
le.fit(Xi)
else:
valid_mask = np.in1d(Xi, self.categories[i])
if not np.all(valid_mask):
if self.handle_unknown == 'error':
diff = np.unique(Xi[~valid_mask])
msg = ("Found unknown categories {0} in column {1}"
" during fit".format(diff, i))
raise ValueError(msg)
le.classes_ = np.array(np.sort(self.categories[i]))
self.categories_ = [le.classes_ for le in self._label_encoders_]
return self
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to encode.
Returns
-------
X_out : sparse matrix or a 2-d array
Transformed input.
"""
X = check_array(X, accept_sparse='csc', dtype=np.object, copy=True)
n_samples, n_features = X.shape
X_int = np.zeros_like(X, dtype=np.int)
X_mask = np.ones_like(X, dtype=np.bool)
for i in range(n_features):
valid_mask = np.in1d(X[:, i], self.categories_[i])
if not np.all(valid_mask):
if self.handle_unknown == 'error':
diff = np.unique(X[~valid_mask, i])
msg = ("Found unknown categories {0} in column {1}"
" during transform".format(diff, i))
raise ValueError(msg)
else:
# Set the problematic rows to an acceptable value and
# continue `The rows are marked `X_mask` and will be
# removed later.
X_mask[:, i] = valid_mask
X[:, i][~valid_mask] = self.categories_[i][0]
X_int[:, i] = self._label_encoders_[i].transform(X[:, i])
if self.encoding == 'ordinal':
return X_int.astype(self.dtype, copy=False)
mask = X_mask.ravel()
n_values = [cats.shape[0] for cats in self.categories_]
n_values = np.array([0] + n_values)
indices = np.cumsum(n_values)
column_indices = (X_int + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(n_samples * n_features)[mask]
out = sparse.csc_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.encoding == 'onehot-dense':
return out.toarray()
else:
return out
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
num_attribs = list( housing_num )
cat_attribs = ["ocean_proximity"]
num_pipeline = Pipeline([
('selector', DataFrameSelector(num_attribs)),
('imputer', Imputer(strategy="median")),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler()),
])
cat_pipeline = Pipeline([
('selector', DataFrameSelector(cat_attribs)),
('cat_encoder', CategoricalEncoder(encoding="onehot-dense")),
])
full_pipeline = FeatureUnion(transformer_list=[
("num_pipeline", num_pipeline),
("cat_pipeline", cat_pipeline),
])
housing_prepared = full_pipeline.fit_transform(housing)
print( housing_prepared )
[[-1.32783522 1.05254828 0.98214266 ..., 0. 1. 0. ]
[-1.32284391 1.04318455 -0.60701891 ..., 0. 1. 0. ]
[-1.33282653 1.03850269 1.85618152 ..., 0. 1. 0. ]
...,
[-0.8237132 1.77823747 -0.92485123 ..., 0. 0. 0. ]
[-0.87362627 1.77823747 -0.84539315 ..., 0. 0. 0. ]
[-0.83369581 1.75014627 -1.00430931 ..., 0. 0. 0. ]]
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import cross_val_predict
from sklearn.ensemble import RandomForestRegressor
#return
import numpy as np
a = 5
b = np.random.permutation(a)
a = np.arange(a)
c = np.random.permutation(a)
np.random.shuffle(a)
print(a)
print(b)
print(c)
[1 3 2 4 0]
[4 0 3 2 1]
[4 3 0 2 1]
import pandas as pd
import os
csv_path = os.path.join("./datasets/housing", "housing.csv")
housing = pd.read_csv( csv_path )
print( housing.head() )
print( housing.info() )
print( housing["ocean_proximity"].value_counts() / len(housing) )
longitude latitude housing_median_age total_rooms total_bedrooms \
0 -122.23 37.88 41.0 880.0 129.0
1 -122.22 37.86 21.0 7099.0 1106.0
2 -122.24 37.85 52.0 1467.0 190.0
3 -122.25 37.85 52.0 1274.0 235.0
4 -122.25 37.85 52.0 1627.0 280.0
population households median_income median_house_value ocean_proximity
0 322.0 126.0 8.3252 452600.0 NEAR BAY
1 2401.0 1138.0 8.3014 358500.0 NEAR BAY
2 496.0 177.0 7.2574 352100.0 NEAR BAY
3 558.0 219.0 5.6431 341300.0 NEAR BAY
4 565.0 259.0 3.8462 342200.0 NEAR BAY
RangeIndex: 20640 entries, 0 to 20639
Data columns (total 10 columns):
longitude 20640 non-null float64
latitude 20640 non-null float64
housing_median_age 20640 non-null float64
total_rooms 20640 non-null float64
total_bedrooms 20433 non-null float64
population 20640 non-null float64
households 20640 non-null float64
median_income 20640 non-null float64
median_house_value 20640 non-null float64
ocean_proximity 20640 non-null object
dtypes: float64(9), object(1)
memory usage: 1.6+ MB
None
<1H OCEAN 0.442636
INLAND 0.317393
NEAR OCEAN 0.128779
NEAR BAY 0.110950
ISLAND 0.000242
Name: ocean_proximity, dtype: float64
%matplotlib inline
是初始化matplotlib的绘图环境%matplotlib inline
import matplotlib.pyplot as plt
housing.plot( kind="scatter", x="longitude", y="latitude", alpha=0.1 )
housing.plot( kind="scatter", x="longitude", y="latitude", alpha=0.4,
s=housing["population"]/100, label="population",c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True )
plt.legend()
from pandas.tools.plotting import scatter_matrix
attributes = ["median_house_value", "median_income", "total_rooms",
"housing_median_age"]
scatter_matrix( housing[attributes], figsize=(12,8) )
# 所有属性之间的相关系数
corr_matrix = housing.corr()
print( corr_matrix )
type(corr_matrix)
longitude latitude housing_median_age total_rooms \
longitude 1.000000 -0.924664 -0.108197 0.044568
latitude -0.924664 1.000000 0.011173 -0.036100
housing_median_age -0.108197 0.011173 1.000000 -0.361262
total_rooms 0.044568 -0.036100 -0.361262 1.000000
total_bedrooms 0.069608 -0.066983 -0.320451 0.930380
population 0.099773 -0.108785 -0.296244 0.857126
households 0.055310 -0.071035 -0.302916 0.918484
median_income -0.015176 -0.079809 -0.119034 0.198050
median_house_value -0.045967 -0.144160 0.105623 0.134153
total_bedrooms population households median_income \
longitude 0.069608 0.099773 0.055310 -0.015176
latitude -0.066983 -0.108785 -0.071035 -0.079809
housing_median_age -0.320451 -0.296244 -0.302916 -0.119034
total_rooms 0.930380 0.857126 0.918484 0.198050
total_bedrooms 1.000000 0.877747 0.979728 -0.007723
population 0.877747 1.000000 0.907222 0.004834
households 0.979728 0.907222 1.000000 0.013033
median_income -0.007723 0.004834 0.013033 1.000000
median_house_value 0.049686 -0.024650 0.065843 0.688075
median_house_value
longitude -0.045967
latitude -0.144160
housing_median_age 0.105623
total_rooms 0.134153
total_bedrooms 0.049686
population -0.024650
households 0.065843
median_income 0.688075
median_house_value 1.000000
pandas.core.frame.DataFrame