Google机器学习 特征集

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/amazingcode/article/details/85063127

学习目标:创建一个包含极少特征但效果与更复杂的特征集一样出色的集合

具有较少特征的模型会使用较少的资源,并且更易于维护。我们来看看能否构建这样一种模型:包含极少的住房特征,但效果与使用数据集中所有特征的模型一样出色。

from __future__ import print_function
import  math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
california_housing_dataframe = pd.read_csv("https://download.mlcc.google.cn/mledu-datasets/california_housing_train.csv", sep=",")
california_housing_dataframe=california_housing_dataframe.reindex(
    np.random.permutation(california_housing_dataframe.index)
)
def preprocess_features(california_housing_dataframe):
    selected_features=california_housing_dataframe[[
        "latitude",
     "longitude",
     "housing_median_age",
     "total_rooms",
     "total_bedrooms",
     "population",
     "households",
     "median_income"
    ]]
    processed_features=selected_features.copy()
    processed_features["room_per_person"]=(
        california_housing_dataframe["total_rooms"]/
        california_housing_dataframe["population"]
    )
    return processed_features
def preprocess_targets(california_housing_dataframe):
    output_targets=pd.DataFrame()
    output_targets["median_house_value"]=(
        california_housing_dataframe["median_house_value"]/1000.0
    )
    return output_targets
training_examples=preprocess_features(california_housing_dataframe.head(12000))
training_targets=preprocess_targets(california_housing_dataframe.head(12000))
validation_examples=preprocess_features(california_housing_dataframe.tail(5000))
validation_targets=preprocess_targets(california_housing_dataframe.tail(5000))
print("Training examples summary:")
display.display(training_examples.describe())
print("Validation examples summary:")
display.display(validation_examples.describe())

print("Training targets summary:")
display.display(training_targets.describe())
print("Validation targets summary:")
display.display(validation_targets.describe())

 

#相关矩阵展现了两两比较的相关性,既包括每个特征与目标特征之间的比较,也包括每个特征与其他特征之间的比较。
#在这里,相关性被定义为皮尔逊相关系数。 -1负相关 0 不相关 1 正相关
correlation_dataframe=training_examples.copy()
correlation_dataframe["target"]=training_targets["median_house_value"]
correlation_dataframe.corr()

 

def construct_feature_columns(input_features):
    return set([tf.feature_column.numeric_column(my_feature) for my_feature in input_features])

def my_input_fn(features,targets,batch_size=1,shuffle=True,num_epochs=None):
    features={key:np.array(value) for key,value in dict(features).items()}
    ds=Dataset.from_tensor_slices((features,targets))#它的作用是切分传入Tensor的第一个维度,生成相应的dataset。
    ds=ds.batch(batch_size).repeat(num_epochs)
    if shuffle:
        ds=ds.shuffle(10000)
    features,labels=ds.make_one_shot_iterator().get_next()
    return features,labels



def train_model(
    learning_rate,
    steps,
    batch_size,
    training_examples,
    training_targets,
    validation_examples,
    validation_targets):
  periods = 10
  steps_per_period = steps / periods

  # Create a linear regressor object.
  my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
  my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
  linear_regressor = tf.estimator.LinearRegressor(
      feature_columns=construct_feature_columns(training_examples),
      optimizer=my_optimizer
  )
    
  # Create input functions.
  training_input_fn = lambda: my_input_fn(training_examples, 
                                          training_targets["median_house_value"], 
                                          batch_size=batch_size)
  predict_training_input_fn = lambda: my_input_fn(training_examples, 
                                                  training_targets["median_house_value"], 
                                                  num_epochs=1, 
                                                  shuffle=False)
  predict_validation_input_fn = lambda: my_input_fn(validation_examples, 
                                                    validation_targets["median_house_value"], 
                                                    num_epochs=1, 
                                                    shuffle=False)

  # Train the model, but do so inside a loop so that we can periodically assess
  # loss metrics.
  print("Training model...")
  print("RMSE (on training data):")
  training_rmse = []
  validation_rmse = []
  for period in range (0, periods):
    # Train the model, starting from the prior state.
    linear_regressor.train(
        input_fn=training_input_fn,
        steps=steps_per_period,
    )
    # Take a break and compute predictions.
    training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn)
    training_predictions = np.array([item['predictions'][0] for item in training_predictions])
    
    validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)
    validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])
    
    # Compute training and validation loss.
    training_root_mean_squared_error = math.sqrt(
        metrics.mean_squared_error(training_predictions, training_targets))
    validation_root_mean_squared_error = math.sqrt(
        metrics.mean_squared_error(validation_predictions, validation_targets))
    # Occasionally print the current loss.
    print("  period %02d : %0.2f" % (period, training_root_mean_squared_error))
    # Add the loss metrics from this period to our list.
    training_rmse.append(training_root_mean_squared_error)
    validation_rmse.append(validation_root_mean_squared_error)
  print("Model training finished.")

  
  # Output a graph of loss metrics over periods.
  plt.ylabel("RMSE")
  plt.xlabel("Periods")
  plt.title("Root Mean Squared Error vs. Periods")
  plt.tight_layout()
  plt.plot(training_rmse, label="training")
  plt.plot(validation_rmse, label="validation")
  plt.legend()

  return linear_regressor
#花 5 分钟时间来搜索一组效果良好的特征和训练参数。然后查看解决方案,看看我们选择了哪些参数。请谨记,不同的特征可能需要不同的学习参数。
minimal_features = [
  "median_income",
  "latitude",
]

minimal_training_examples = training_examples[minimal_features]
minimal_validation_examples = validation_examples[minimal_features]

_ = train_model(
    learning_rate=0.01,
    steps=500,
    batch_size=5,
    training_examples=minimal_training_examples,
    training_targets=training_targets,
    validation_examples=minimal_validation_examples,
    validation_targets=validation_targets)

 

plt.scatter(training_examples["latitude"],training_examples["median_income"])
#两者确实不存在线性关系。
#有几个峰值与洛杉矶和旧金山大致相对应。

#zip() 函数用于将可迭代的对象作为参数,将对象中对应的元素打包成一个个元组,然后返回由这些元组组成的列表。
#尝试创建一些能够更好地利用纬度的合成特征。
#例如,您可以创建某个特征,将 latitude 映射到值 |latitude - 38|,并将该特征命名为 #distance_from_san_francisco。
#或者,您可以将该空间分成 10 个不同的分桶(例如 latitude_32_to_33、latitude_33_to_34 等):如#果 latitude 位于相应分桶范围内,则显示值 1.0;如果不在范围内,则显示值 0.0。
#使用相关矩阵来指导您构建合成特征;如果您发现效果还不错的合成特征,可以将其添加到您的模型中。
#您可以获得的最佳验证效果是什么?
#对纬度进行分桶
LATITUDE_RANGES = zip(range(32, 44), range(33, 45))
OPPO = zip(range(32,44),range(33,45))
def select_and_transform_features(source_df):
  selected_examples = pd.DataFrame()
  selected_examples["median_income"] = source_df["median_income"]
  for r in LATITUDE_RANGES:
    selected_examples["latitude_%d_to_%d" % r] = source_df["latitude"].apply(
      lambda l: 1.0 if l >= r[0] and l < r[1] else 0.0)
  return selected_examples
def select_and_transform_featurestwo(source_df):
    selected_examples = pd.DataFrame()
    selected_examples["median_income"] = source_df["median_income"]
    for r in OPPO:
        selected_examples["latitude_%d_to_%d" % r] = source_df["latitude"].apply(lambda l:1.0 if l >= r[0] and l < r[1] else 0.0)
    return selected_examples
selected_training_examples = select_and_transform_features(training_examples)
selected_validation_examples = select_and_transform_featurestwo(validation_examples)
_ = train_model(
    learning_rate=0.01,
    steps=500,
    batch_size=5,
    training_examples=selected_training_examples,
    training_targets=training_targets,
    validation_examples=selected_validation_examples,
    validation_targets=validation_targets)

猜你喜欢

转载自blog.csdn.net/amazingcode/article/details/85063127