Machine learning - the use of pickle file loaded cifar

 

First, there are Baidu cloud data sets for everyone to download the official website is too slow :()

Link: https: //pan.baidu.com/s/1G0MxZIGSK_DyZTcuNbxraQ
extraction code: ui51
copy the contents of this open Baidu network disk phone App, the operation more convenient oh

Then dedication Code

DEF load_CIFAR10 (the ROOT):
     "" " Loading all data cifar " "" 
    XS = [] 
    YS = []
     for B in Range (. 1, 2 ): 
        F = the os.path.join (the ROOT, ' data_batch_% D ' % (B,)) 
        X-, the Y = load_CIFAR_batch (F) 
        xs.append (X-)          # all batch integrate 
        ys.append (the Y) 
    Xtr = np.concatenate (XS) # so becomes a row vector, the final Xtr size (50000,32,32,3) 
    Ytr = np.concatenate (YS)
     del  X-, the Y
    XTE, YTE= load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))
    return Xtr, Ytr, Xte, Yte

Find cifar binaries folder below:

Then each time a batch file:

def load_CIFAR_batch(filename):
    """ 直接读入cifar数据集的一个batch """
    with open(filename, 'rb') as f:
        datadict = p.load(f, encoding='latin1')
        X = datadict['data']
        Y = datadict['labels']
        X = X.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype("float")
        Y = np.array(Y)
        return X, Y

test:

Import numpy NP AS 

# Loading CIFAR-10 data sets 
cifar10_dir = ' Data \ cifar10 \ cifar10-Py-Batches ' 
X_train, y_train, X_test, android.permission.FACTOR. = load_CIFAR10 (cifar10_dir) 

# look at some sample data set: each show some categories 
Print ( ' training data form: ' , X_train.shape)
 Print ( ' shape the training set of tags: ' , y_train.shape)
 Print ( ' shape test data: ' , X_test.shape)
 Print ( ' test shape data: ' , y_test.shape)
import pickle as p
import os


def load_CIFAR_batch(filename):
    """ 载入cifar数据集的一个batch """
    with open(filename, 'rb') as f:
        datadict = p.load(f, encoding='latin1')
        X = datadict['data']
        Y = datadict['labels']
        X = X.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype("float"=
        Y)np.array (the Y)
         return X-, the Y 


DEF load_CIFAR10 (the ROOT):
     "" " Loading all data cifar " "" 
    XS = [] 
    YS = []
     for B in Range (. 1, 2 ): 
        F = the os.path .join (the ROOT, ' data_batch_ D% ' % (B,)) 
        X-, the Y = load_CIFAR_batch (F) 
        xs.append (X-)          # all batch integrate 
        ys.append (the Y) 
    Xtr = np.concatenate (XS) # make into a row vector, the final size of Xtr (50000,32,32,3) 
    Ytr =np.concatenate (YS)
     del X-, the Y 
    XTE, YTE = load_CIFAR_batch (the os.path.join (the ROOT, ' test_batch ' ))
     return Xtr, Ytr, XTE, YTE 

IF  the __name__ == ' __main__ ' :
     Import numpy NP AS 


    # Loading CIFAR-10 data sets 
    cifar10_dir = ' data \ cifar10 \ cifar10-Py-Batches ' 
    X_train, y_train, X_test, android.permission.FACTOR. = load_CIFAR10 (cifar10_dir) 

    # look at some sample data set: each category show some 
    Print ( ' Training Shape Data: ', X_train.shape)
    print('Training labels shape: ', y_train.shape)
    print('Test data shape: ', X_test.shape)
    print('Test labels shape: ', y_test.shape)

 

Guess you like

Origin www.cnblogs.com/TimVerion/p/11226189.html