Python 常用代码段 (算法工程师|数据分析|数学建模|大数据)持续更新中

主函数

if __name__ == "__main__":
    from argparse import ArgumentParser
    ap = ArgumentParser()
    ap.add_argument('-s', '--sec_id', type=int, required=True)
    ap.add_argument('-o', '--output_dir', type=str, required=True)
    args = ap.parse_args()

一、数据预处理

1.1 缺失值处理

缺失值检测与统计
按列

df.isna().sum()
df.isnull().sum()
df.shape[0] - df.count()

按行

df.isna().sum(axis=1)
df.isnull().sum(axis=1)
df.shape[1] - df.count(axis=1)

相关处理函数

df.dropna(axis=0, how='any', thresh=None, subset=None, inplace=False)

函数作用:删除含有空值的行或列
axis:维度,axis=0表示index行,axis=1表示columns列,默认为0
how:"all"表示这一行或列中的元素全部缺失(为nan)才删除这一行或列,"any"表示这一行或列中只要有元素缺失,就删除这一行或列
thresh:一行或一列中至少出现了thresh个才删除。
subset:在某些列的子集中选择出现了缺失值的列删除,不在子集中的含有缺失值得列或行不会删除(有axis决定是行还是列)
inplace:得新数据是存为副本还是直接在原数据上进行修改。

df.fillna(value=None, method=None, axis=None, inplace=False, limit=None, downcast=None)

函数作用: 填充缺失值
value: 需要用什么值去填充缺失值
axis: 确定填充维度,从行开始或是从列开始
method: fill:用缺失值前面的一个值代替缺失值,如果axis =1,那么就是横向的前面的值替换后面的缺失值,如果axis=0,那么则是上面的值替换下面的缺失值。backfill/bfill,缺失值后面的一个值代替前面的缺失值。注意这个参数不能与value同时出现
limit: 确定填充的个数,如果limit=2,则只填充两个缺失值。

1.2 异常值处理

使用四分位距来过滤异常值(检测出的异常值变成np.nan)

def deal_outliers(df, pro_name, time=1.5):
	"""
	df: feature dataframe
	pro_name: 你想要进行异常值处理的列名
	time: 大于time倍的四分位距的数值被认为是异常值,默认1.5倍
	"""
    Q1 = df[pro_name].quantile(0.25)
    Q3 = df[pro_name].quantile(0.75)
    IQR = Q3 - Q1
#    df = df[~( (df[pro_name] < (Q1-1.5*IQR)) | (df[pro_name] > (Q3+1.5*IQR)) )]
    df[(df[pro_name] < (Q1-time*IQR)) | (df[pro_name] > (Q3+time*IQR))]=np.nan
    return df

1.3 数据transformation

标准化standardization

from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X)
n_X = scaler.transform(X)

归一化normalization

二、模型

2.1 划分训练集和测试集

from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, 
        test_size=0.3, random_state=0)

2.2 交叉验证

一次划分(one fold)

def one_fold(X, y, model, randomState, testFraction=0.2):
    X_train, X_test, y_train, y_test = train_test_split(X, y, 
        test_size=testFraction, random_state=randomState)
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)
    mae, mse, r2, p_cor, rho, tau = model_evaluation(y_test, y_pred)
    return mae, mse, r2, p_cor, rho, tau, model

k-折交叉验证(k-fold cross validation)

专门用KFold配合,是希望每次交叉验证的数据能够统一; 配合cross_val_predict使用,直接输出预测值,方便做各种评估

from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_predict
kf = KFold(n_splits=5, random_state=0, shuffle=True)
y_pred = cross_val_predict(m, X, y, cv=kf, method='predict', n_jobs=-1)

2.3 调参(tune hyperparameters)

手动grid search

param_grid = {
    
      
    'l1_ratio': [0.001, 0.01, 0.1, 0.5],
    'alpha': [0.001, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 1],
    'random_state': [0]
}
all_params = [dict(zip(param_grid.keys(), v)) for v in itertools.product(*param_grid.values())]
for param in all_params:
	m = model(**param)

使用grid search函数(以回归树为例)

from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV
# 网格化搜索最优参数
def grid_search(clf, grid_param, X, y, reg=False): # reg为True表示是回归模型,使用mse评估模型
    if reg:
        measure = 'neg_mean_absolute_error'
    else:
        measure = 'roc_auc'
    gs = GridSearchCV(estimator = clf,
                      param_grid = grid_param,
                      scoring = measure,
                      cv = 5,
                      n_jobs = -1)
    gs.fit(X, y)
    params = gs.best_params_
    return params # 返回grid search找到的最好模型参数

# 调参代码
def DT_reg(X, y):
    max_depth = list(range(10, 50, 5))
    max_depth.append(None)
    grid_param  = {
    
    'max_features': ['auto', 'sqrt', 'log2', None],
                   'max_depth': max_depth,
                   'min_samples_split': [2, 5, 10],
                   'min_samples_leaf': [1, 2, 4],
                   'random_state': [0]
                   }
    clf = DecisionTreeRegressor()
    params = grid_search(clf, grid_param, X, y, True)
    # 返回的就是使用grid search找到的最优模型
    return DecisionTreeRegressor(max_features = params['max_features'],
                                  max_depth = params['max_depth'],
                                  min_samples_split = params['min_samples_split'],
                                  min_samples_leaf = params['min_samples_leaf'],
                                  random_state=params['random_state'])
clf = DT_reg(X, y) # grid search找到的最优模型

2.4 模型评估

回归模型评估指标(metrics)

from scipy.stats import pearsonr, spearmanr, kendalltau
from sklearn import metrics
def model_evaluation(y_true, y_pred):
    mae = metrics.mean_absolute_error(y_true, y_pred)
    mse = metrics.mean_squared_error(y_true, y_pred)
    rmse = np.sqrt(metrics.mean_squared_error(y_true, y_hat))
    r2 = metrics.r2_score(y_true, y_pred)
    p_cor, pval = pearsonr(y_true, y_pred)
    rho, pval = spearmanr(y_true, y_pred)
    tau, pval = kendalltau(y_true, y_pred)
    return rmse, mse, mae, r2, p_cor, rho, tau
rmse, mse, mae, r2, p_cor, rho, tau = model_evaluation(y_test, y_pred)

分类模型评估指标(metrics)

2.5 分类模型调用

下面的有函数会用到这个grid search

def grid_search(clf, grid_param, X, y, reg=False):
    if reg:
        measure = 'neg_mean_absolute_error'
    else:
        measure = 'roc_auc'
    gs = GridSearchCV(estimator = clf,
                      param_grid = grid_param,
                      scoring = measure,
                      cv = 5,
                      n_jobs = 40)
    gs.fit(X, y)
    params = gs.best_params_
    return params

Stacking分类模型

from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import StackingClassifier

weight = {
    
    1: 0.5, 0:0.5}
# get a stacking ensemble of models
def get_stacking():
	# define the base models
    level0 = list()
    level0.append(('lr', LogisticRegression(class_weight=weight)))
    level0.append(('knn', KNeighborsClassifier()))
    level0.append(('cart', DecisionTreeClassifier(class_weight=weight)))
    level0.append(('svm', SVC(class_weight=weight, gamma='scale', probability=True))) #gamma='scale', probability=True
    level0.append(('NB', GaussianNB()))
    level0.append(('Ada', AdaBoostClassifier()))
    level0.append(('Bag', BaggingClassifier()))
    level0.append(('RF',  RandomForestClassifier(class_weight=weight)))
    level0.append(('Ext', ExtraTreesClassifier(class_weight=weight)))
	# define meta learner model
    level1 = LogisticRegression(class_weight=weight)
	# define the stacking ensemble
    model = StackingClassifier(estimators=level0, final_estimator=level1, cv=5)
    return model
    
from sklearn.datasets import make_classification
def get_dataset():
	X, y = make_classification(n_samples=1000, n_features=20, n_informative=15, n_redundant=5, random_state=1)
	return X, y
X, y = get_dataset()
model = get_stacking()
model.fit(X,y)

Logistic regression

def LR(X, y):
    clf = LogisticRegression(solver='liblinear', n_jobs=1)
    grid_param  = {
    
    'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
                   'penalty': ['l1', 'l2'],
                   'random_state': list(range(10)),
                   'max_iter': list(range(0,500, 50)),
                   'solver': ['liblinear', 'newton-cg', 'lbfgs', 'sag', 'saga']
                   }
    params = grid_search(clf, grid_param, X, y)
    return LogisticRegression(C = params['C'],
                              penalty = params['penalty'],
                              random_state = params['random_state'],
                              max_iter = params['max_iter'],
                              solver = params['solver'], n_jobs=1)

决策树

def DT(X, y):
    max_depth = list(range(10, 110, 5))
    max_depth.append(None)
    grid_param  = {
    
    'max_features': ['auto', 'sqrt', 'log2', None],
                   'max_depth': max_depth,
                   'min_samples_split': [2, 5, 10],
                   'min_samples_leaf': [1, 2, 4],
                   'random_state': list(range(0, 50, 5))
                   }
    clf = DecisionTreeClassifier()
    params = grid_search(clf, grid_param, X, y)
    return DecisionTreeClassifier(max_features = params['max_features'],
                                  max_depth = params['max_depth'],
                                  min_samples_split = params['min_samples_split'],
                                  min_samples_leaf = params['min_samples_leaf'],
                                  random_state=params['random_state'])

支持向量机

def SVM(X, y):
    clf = SVC(probability=True)
    C = [0.001, 0.10, 0.1, 1, 10, 25, 50, 100, 1000]
    gamma = [1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5]
    grid_param = {
    
    'C': C,
                  'kernel': ['rbf'], #'poly', 'precomputed', 'linear', 'rbf', 'sigmoid'
                  'gamma': gamma}
    params = grid_search(clf, grid_param, X, y)
    return SVC(probability=True, 
               kernel = params['kernel'],
               C = params['C'], 
               gamma = params['gamma'],
               random_state = 42)

随机森林

def RF(X, y, reg=False):
    n_estimators = list(range(20, 1000, 10))
    max_features = ['auto', 'sqrt', 'log2', None]
    max_depth = list(range(10, 110, 5))
    max_depth.append(None)
    min_samples_split = [2, 5, 10]
    min_samples_leaf = [1, 2, 4]
    bootstrap = [True, False]
    random_grid = {
    
    'n_estimators': n_estimators,
                   'max_features': max_features,
                   'max_depth': max_depth,
                   'min_samples_split': min_samples_split,
                   'min_samples_leaf': min_samples_leaf,
                   'bootstrap': bootstrap}
    if reg:
        score = 'neg_mean_squared_error'
        rf = RandomForestRegressor(n_jobs=1)
    else:
        score = 'roc_auc'
        rf = RandomForestClassifier(n_jobs=1)
    rf_random = RandomizedSearchCV(estimator = rf,
                                   scoring = score,
                                   param_distributions=random_grid,
                                   n_iter=100,cv=5,verbose=2,random_state=42, 
                                   n_jobs = 42)
    rf_random.fit(X, y)
    params = rf_random.best_params_
    if reg:
        return RandomForestRegressor(bootstrap=params['bootstrap'],
                                  max_depth=params['max_depth'],
                                  max_features=params['max_features'],
                                  min_samples_leaf=params['min_samples_leaf'],
                                  n_estimators=params['n_estimators'], 
                                  random_state=42,
                                  n_jobs = 1)
    else:
        return RandomForestClassifier(bootstrap=params['bootstrap'],
                                  max_depth=params['max_depth'],
                                  max_features=params['max_features'],
                                  min_samples_leaf=params['min_samples_leaf'],
                                  n_estimators=params['n_estimators'], 
                                  random_state=42,
                                  n_jobs = 1)

k-近邻

def KNN(X, y, reg=False):
    X = Z_score(X)
    n_neighbors = list(range(1,10))
    algorithm = ['auto', 'ball_tree', 'kd_tree', 'brute']
    leaf_size = list(range(5, 50, 5))
    p = [1, 2]
    if reg:
        clf = KNeighborsRegressor(n_jobs=1)
    else:
        clf = KNeighborsClassifier(n_jobs=1)
    grid_param = {
    
    'n_neighbors': n_neighbors,
                              'algorithm': algorithm,
                              'leaf_size': leaf_size,
                              'p': p}
    params = grid_search(clf, grid_param, X, y, reg)
    if reg:
        return KNeighborsRegressor(n_neighbors = params['n_neighbors'],
                                algorithm = params['algorithm'],
                                leaf_size = params['leaf_size'],
                                p = params['p'],
                                n_jobs = 1)
    else:
        return KNeighborsClassifier(n_neighbors = params['n_neighbors'],
                                    algorithm = params['algorithm'],
                                    leaf_size = params['leaf_size'],
                                    p = params['p'],
                                    n_jobs = 1)

2.6 回归模型

Stacking回归模型

from sklearn.linear_model import LinearRegression, Ridge, Lasso, SGDRegressor
from sklearn.linear_model import ElasticNet
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import SVR
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import StackingRegressor
from matplotlib import pyplot

# get the dataset
def get_dataset():
	X, y = make_regression(n_samples=1000, n_features=20, n_informative=15, noise=0.1, random_state=1)
	return X, y

# get a stacking ensemble of models
def get_stacking():
	# define the base models
	level0 = list()
	level0.append(('lr', LinearRegression()))
	level0.append(('SGD', SGDRegressor()))
	level0.append(('Ridge', Ridge()))
	level0.append(('Lasso', Lasso()))
	level0.append(('knn', KNeighborsRegressor()))
	level0.append(('cart', DecisionTreeRegressor()))
	level0.append(('svm', SVR()))
	level0.append(('Ada', AdaBoostRegressor()))
	level0.append(('Bag', BaggingRegressor()))
	level0.append(('RF',  RandomForestRegressor()))
	level0.append(('Ext', ExtraTreesRegressor()))
	# define meta learner model
	level1 = LinearRegression()
	# define the stacking ensemble
	model = StackingRegressor(estimators=level0, final_estimator=level1, cv=5)
	return model

python面向对象编程

https://www.cnblogs.com/bigberg/p/7182741.html

python虚拟环境

sudo apt update
sudo apt install python3-dev python3-pip
sudo pip3 install -U virtualenv
virtualenv --system-site-packages -p python3 ./venv
source ./venv/bin/activate
deactivate

文件操作

列出目录下的内容

os.listdir(path)

列出目录下的文件

from os import listdir
from os.path import isfile, join, isdir
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]

列出目录下的文件夹

onlydirs = [f for f in listdir(mypath) if isdir(join(mypath, f))]

获取指定目录下的绝对路径

import glob
mypath = "/home/zdx/Downloads/topoII/*"
print(glob.glob(mypath))

系统

查看和更改工作路径

import os
os.getcwd() # know current directory
os.chdir("/tmp/") # change curren directory

在python中调用shell命令

from subprocess import call
call(['prepare_ligand4.py', '-l {0} -o {1}/{2}.pdbqt -A checkhydrogens'.format("/tmp/ligand.mol2", library_PostPrepare_folder, mol_id)])

List

对list每个元素做同样的操作

ligand_name = list(map(fun, var))

Numpy

Pandas

创建一个dataframe

import pandas as pd
a = [1,2,3]
b = [4,5,6]
data = {
    
    'A': a,'B': b}  
df = pd.DataFrame(data)

导出dataframe

df.to_csv(f_t, sep='\t', index=False, encoding='utf-8')

读取excel, csv

df = pd.read_excel('topoII.true_label.xlsx', index_col=0) 
df = pd.read_csv(score_file, header = None, names = ['label', 'score'])

定位值的位置

df_t[df_t['label']==0].index.values.astype(int)

对每一行,每一列做同样的操作

df['ID'] = df['ID'].apply(extract_number)

删除某行某列

del df['column_name']

打乱数据框

df = df.sample(frac=1).reset_index(drop=True) # dataframe shuffle

排序

按某一列排序

df =  df.sort_values(by='col', ascending=False) # False 从大到小排序

索引

数字索引

df.iat[0,0]

按某一列的部分值索引

df.loc[df['col'].isin(values)]

新建索引,原先索引变成列

df = df.reset_index()

猜你喜欢

转载自blog.csdn.net/zdx1996/article/details/89395334