android 网络2 Volley(2) 源码解读

首先看我们的使用

private void testStringRequest() {
    String url = "http://api.k780.com/?app=weather.history&weaid=1&date=2015-07-20&appkey=10003&sign=b59bc3ef6191eb9f747dd4e83c99f2a4&format=json";
    RequestQueue queue = Volley.newRequestQueue(getApplicationContext());
    StringRequest stringRequest = new StringRequest(Request.Method.GET, url, new Response.Listener<String>() {
        @Override
        public void onResponse(String response) {
            Log.i("response:", response);
        }
    }, new Response.ErrorListener() {
        @Override
        public void onErrorResponse(VolleyError error) {
            Log.e("error", error.getMessage());
        }
    }
    );
    queue.add(stringRequest);
}

首先我们第一步是根据上下文对象创建一个RequestQueue

RequestQueue queue = Volley.newRequestQueue(getApplicationContext());

对应的源码是:

public class Volley {

    /** Default on-disk cache directory. */
    private static final String DEFAULT_CACHE_DIR = "volley";

    /**
     * Creates a default instance of the worker pool and calls {@link RequestQueue#start()} on it.
     *
     * @param context A {@link Context} to use for creating the cache dir.
     * @param stack An {@link HttpStack} to use for the network, or null for default.
     * @return A started {@link RequestQueue} instance.
     */
    public static RequestQueue newRequestQueue(Context context, HttpStack stack) {
        File cacheDir = new File(context.getCacheDir(), DEFAULT_CACHE_DIR);

        String userAgent = "volley/0";
        try {
            String packageName = context.getPackageName();
            PackageInfo info = context.getPackageManager().getPackageInfo(packageName, 0);
            userAgent = packageName + "/" + info.versionCode;
        } catch (NameNotFoundException e) {
        }

        if (stack == null) {
            if (Build.VERSION.SDK_INT >= 9) {
                stack = new HurlStack();
            } else {
                // Prior to Gingerbread, HttpUrlConnection was unreliable.
                // See: http://android-developers.blogspot.com/2011/09/androids-http-clients.html
                stack = new HttpClientStack(AndroidHttpClient.newInstance(userAgent));
            }
        }

        Network network = new BasicNetwork(stack);

        RequestQueue queue = new RequestQueue(new DiskBasedCache(cacheDir), network);
        queue.start();

        return queue;
    }

    /**
     * Creates a default instance of the worker pool and calls {@link RequestQueue#start()} on it.
     *
     * @param context A {@link Context} to use for creating the cache dir.
     * @return A started {@link RequestQueue} instance.
     */
    public static RequestQueue newRequestQueue(Context context) {
        return newRequestQueue(context, null);
    }
}
Volley类的源码非常简单,只有80行代码

我们调用的是

public static RequestQueue newRequestQueue(Context context)

然后它又调用了其重载方法

public static RequestQueue newRequestQueue(Context context, HttpStack stack)

所以只需要看这个重载方法就行了

首先是创建一个默认的缓存目录,然后根据上下文对象拿到包名,然后根据包名拿到包管理器,通过包管理器拿到对应的versionCode,最后拼接成userAgent字符串备用,因为下面在使用AndroidHttpClient.newInstance(userAgent)作为参数创建HttpClientStack的时候要用到,其实这里我认为代码还能够优化一下,这个获取userAgent的过程可以往后推移一下,在判断完成经行。因为下面会根据Build.VERSION.SDK_INT >= 9来判断是使用HttpURLConnection还是使用HttpClient,最终都是为了创建一个网络进行数据请求,同时传入了一个缓存对象。

Network network = new BasicNetwork(stack);
RequestQueue queue = new RequestQueue(new DiskBasedCache(cacheDir), network);
queue.start();

接下来我们看RequestQueue的源码,只看重要部分

public class RequestQueue {

    ... ...省略

    /** Number of network request dispatcher threads to start. */
    private static final int DEFAULT_NETWORK_THREAD_POOL_SIZE = 4;

    /** Cache interface for retrieving and storing responses. */
    private final Cache mCache;

    /** Network interface for performing requests. */
    private final Network mNetwork;

    /** Response delivery mechanism. */
    private final ResponseDelivery mDelivery;

    /** The network dispatchers. */
    private NetworkDispatcher[] mDispatchers;

    /** The cache dispatcher. */
    private CacheDispatcher mCacheDispatcher;

    private List<RequestFinishedListener> mFinishedListeners =
            new ArrayList<RequestFinishedListener>();

    /**
     * Creates the worker pool. Processing will not begin until {@link #start()} is called.
     *
     * @param cache A Cache to use for persisting responses to disk
     * @param network A Network interface for performing HTTP requests
     * @param threadPoolSize Number of network dispatcher threads to create
     * @param delivery A ResponseDelivery interface for posting responses and errors
     */
    public RequestQueue(Cache cache, Network network, int threadPoolSize,
            ResponseDelivery delivery) {
        mCache = cache;
        mNetwork = network;
        mDispatchers = new NetworkDispatcher[threadPoolSize];
        mDelivery = delivery;
    }

    /**
     * Creates the worker pool. Processing will not begin until {@link #start()} is called.
     *
     * @param cache A Cache to use for persisting responses to disk
     * @param network A Network interface for performing HTTP requests
     * @param threadPoolSize Number of network dispatcher threads to create
     */
    public RequestQueue(Cache cache, Network network, int threadPoolSize) {
        this(cache, network, threadPoolSize,
                new ExecutorDelivery(new Handler(Looper.getMainLooper())));
    }

    /**
     * Creates the worker pool. Processing will not begin until {@link #start()} is called.
     *
     * @param cache A Cache to use for persisting responses to disk
     * @param network A Network interface for performing HTTP requests
     */
    public RequestQueue(Cache cache, Network network) {
        this(cache, network, DEFAULT_NETWORK_THREAD_POOL_SIZE);
    }

    /**
     * Starts the dispatchers in this queue.
     */
    public void start() {
        stop();  // Make sure any currently running dispatchers are stopped.
        // Create the cache dispatcher and start it.
        mCacheDispatcher.start();
        mCacheDispatcher = new CacheDispatcher(mCacheQueue, mNetworkQueue, mCache, mDelivery);

        // Create network dispatchers (and corresponding threads) up to the pool size.
        for (int i = 0; i < mDispatchers.length; i++) {
            NetworkDispatcher networkDispatcher = new NetworkDispatcher(mNetworkQueue, mNetwork,
                    mCache, mDelivery);
            mDispatchers[i] = networkDispatcher;
            networkDispatcher.start();
        }
    }

    /**
     * Stops the cache and network dispatchers.
     */
    public void stop() {
        if (mCacheDispatcher != null) {
            mCacheDispatcher.quit();
        }
        for (int i = 0; i < mDispatchers.length; i++) {
            if (mDispatchers[i] != null) {
                mDispatchers[i].quit();
            }
        }
    }

    ... ...省略

    /**
     * Adds a Request to the dispatch queue.
     * @param request The request to service
     * @return The passed-in request
     */
    public <T> Request<T> add(Request<T> request) {
        // Tag the request as belonging to this queue and add it to the set of current requests.
        request.setRequestQueue(this);
        synchronized (mCurrentRequests) {
            mCurrentRequests.add(request);
        }

        // Process requests in the order they are added.
        request.setSequence(getSequenceNumber());
        request.addMarker("add-to-queue");

        // If the request is uncacheable, skip the cache queue and go straight to the network.
        if (!request.shouldCache()) {
            mNetworkQueue.add(request);
            return request;
        }

        // Insert request into stage if there's already a request with the same cache key in flight.
        synchronized (mWaitingRequests) {
            String cacheKey = request.getCacheKey();
            if (mWaitingRequests.containsKey(cacheKey)) {
                // There is already a request in flight. Queue up.
                Queue<Request<?>> stagedRequests = mWaitingRequests.get(cacheKey);
                if (stagedRequests == null) {
                    stagedRequests = new LinkedList<Request<?>>();
                }
                stagedRequests.add(request);
                mWaitingRequests.put(cacheKey, stagedRequests);
                if (VolleyLog.DEBUG) {
                    VolleyLog.v("Request for cacheKey=%s is in flight, putting on hold.", cacheKey);
                }
            } else {
                // Insert 'null' queue for this cacheKey, indicating there is now a request in
                // flight.
                mWaitingRequests.put(cacheKey, null);
                mCacheQueue.add(request);
            }
            return request;
        }
    }

    
    ... ...省略
}

add方法

add方法中其实只是根据根据判断条件往NetworkDispatcher 或者 CacheDispatcher中添加请求(StringRequest,JsonRequest,ImageRequest),并不负责请求,网络的请求实际上是NetworkDispatcher和CacheDispatcher他们自己完成。

start方法

从上面源码我们可以看到,start方法做了什么,CacheDispatcher和NetworkDispatcher分别是两个调度线程,他们都是Thread的子类,CacheDispatcher是缓存调度线程,NetworkDispatcher是网络调度线程,在RequestQueue的构造函数中能够清楚的看到NetworkDispatcher作为一个的线程组被初始化长度是4,也就是说网络调度线程在Volley中有4条。start方法中同时开启了这两种调度线程,也就是说后Volley在后台同时开起来了5条线程等待处理请求。

RequestQueue中的start方法是被调用是在Volley通过newRequestQueue创建RequestQueue的时候,

public static RequestQueue newRequestQueue(Context context, HttpStack stack) {
    File cacheDir = new File(context.getCacheDir(), DEFAULT_CACHE_DIR);

    String userAgent = "volley/0";
    try {
        String packageName = context.getPackageName();
        PackageInfo info = context.getPackageManager().getPackageInfo(packageName, 0);
        userAgent = packageName + "/" + info.versionCode;
    } catch (NameNotFoundException e) {
    }

    if (stack == null) {
        if (Build.VERSION.SDK_INT >= 9) {
            stack = new HurlStack();
        } else {
            // Prior to Gingerbread, HttpUrlConnection was unreliable.
            // See: http://android-developers.blogspot.com/2011/09/androids-http-clients.html
            stack = new HttpClientStack(AndroidHttpClient.newInstance(userAgent));
        }
    }

    Network network = new BasicNetwork(stack);

    RequestQueue queue = new RequestQueue(new DiskBasedCache(cacheDir), network);
    queue.start();

    return queue;
}

Thread,Thread的start方法和run方法有什么区别?

Thread 实现了Runnable接口,Runnable接口中的代码很简单,就只有一个抽象方法run。

因此创建线程有两张方式:

1)继承Thread类

2)实现Runnable接口,重写run方法,然后构造一个Thread对象,将Runnable的这个子类通过Thread的构造函数传递给Thread,Thread在构造函数中会调用其init方法,把这个子类继续往下传递,赋值给Thread的自变量target,然后Thread的run函数被调用的时候,实际上就是调用target.run()

start函数做了什么呢?

根据方法描述翻译过来是:

[调用start方法会]导致此线程开始执行; Java虚拟机调用此线程的 run 方法。

结果是两个线程同时运行:当前线程(从调用start方法返回)和另一个线程(执行其run方法)。(也可以说是主线程和子线程)

多次启动同一个线程是不允许的,特别是,线程完成它执行的任务后,可能就不会被重新启动了。

从方法描述上我们知道了start方法只是创建一个线程并开始执行,但是run方法是java虚拟机调用的,所以我们在源码中不会看见run方法被调用的痕迹。

因此我们这里可以解答一个问题:

start和run有什么区别?

start被调用的时候java虚拟机会创建一个线程,然后这个线程处于等待状态,一旦cpu给该线程分配时间了,这个时候该线程才会开始执行然后run方法会被虚拟机调用。两者的区别便是,start会创建一个线程,run不会创建线程,run方法只是在等待被调用,然后处理赋予该线程需要处理的事情。

那又有一个问题,可能有人会问run方法是怎么被java虚拟机调用的呢?在start方法中nativeCreate(this, stackSize, daemon);将this传给了native方法,由于更底层的代码这里看不到,但是我们可以推断,既然this作为指引被传过去,那么run方法自然就能够被拿到。

new Thread(new Runnable() {
    @Override
    public void run() {

    }
}).start();

############Thread部分源码#############

/**
 * Allocates a new {@code Thread} object. This constructor has the same
 * effect as {@linkplain #Thread(ThreadGroup,Runnable,String) Thread}
 * {@code (null, target, gname)}, where {@code gname} is a newly generated
 * name. Automatically generated names are of the form
 * {@code "Thread-"+}<i>n</i>, where <i>n</i> is an integer.
 *
 * @param  target
 *         the object whose {@code run} method is invoked when this thread
 *         is started. If {@code null}, this classes {@code run} method does
 *         nothing.
 */
public Thread(Runnable target) {
    init(null, target, "Thread-" + nextThreadNum(), 0);
}
/**
 * Initializes a Thread.
 *
 * @param g the Thread group
 * @param target the object whose run() method gets called
 * @param name the name of the new Thread
 * @param stackSize the desired stack size for the new thread, or
 *        zero to indicate that this parameter is to be ignored.
 */
private void init(ThreadGroup g, Runnable target, String name, long stackSize) {
    Thread parent = currentThread();
    if (g == null) {
        g = parent.getThreadGroup();
    }

    g.addUnstarted();
    this.group = g;

    this.target = target;
    this.priority = parent.getPriority();
    this.daemon = parent.isDaemon();
    setName(name);

    init2(parent);

    /* Stash the specified stack size in case the VM cares */
    this.stackSize = stackSize;
    tid = nextThreadID();
}
/**
 * If this thread was constructed using a separate
 * <code>Runnable</code> run object, then that
 * <code>Runnable</code> object's <code>run</code> method is called;
 * otherwise, this method does nothing and returns.
 * <p>
 * Subclasses of <code>Thread</code> should override this method.
 *
 * @see     #start()
 * @see     #stop()
 * @see     #Thread(ThreadGroup, Runnable, String)
 */
@Override
public void run() {
    if (target != null) {
        target.run();
    }
}
/**
 * Causes this thread to begin execution; the Java Virtual Machine
 * calls the <code>run</code> method of this thread.
 * <p>
 * The result is that two threads are running concurrently: the
 * current thread (which returns from the call to the
 * <code>start</code> method) and the other thread (which executes its
 * <code>run</code> method).
 * <p>
 * It is never legal to start a thread more than once.
 * In particular, a thread may not be restarted once it has completed
 * execution.
 *
 * @exception  IllegalThreadStateException  if the thread was already
 *               started.
 * @see        #run()
 * @see        #stop()
 */
public synchronized void start() {
    /**
     * This method is not invoked for the main method thread or "system"
     * group threads created/set up by the VM. Any new functionality added
     * to this method in the future may have to also be added to the VM.
     *
     * A zero status value corresponds to state "NEW".
     */
    // Android-changed: throw if 'started' is true
    if (threadStatus != 0 || started)
        throw new IllegalThreadStateException();

    /* Notify the group that this thread is about to be started
     * so that it can be added to the group's list of threads
     * and the group's unstarted count can be decremented. */
    group.add(this);

    started = false;
    try {
        nativeCreate(this, stackSize, daemon);
        started = true;
    } finally {
        try {
            if (!started) {
                group.threadStartFailed(this);
            }
        } catch (Throwable ignore) {
            /* do nothing. If start0 threw a Throwable then
              it will be passed up the call stack */
        }
    }
}

CacheDispatcher

经过上面的分析我们可以清楚的知道调用链,Volley通过newRequestQueue创建RequestQueue,并调用了其start方法,然后宰割方法中分别调用了CacheDispatcher和NetworkDispatcher的start方法,开启了5条线程,当cpu给线程分配时间后,开始执行,虚拟机将会调用个线程的run方法,所以这里我们将关注点放在CacheDispatcher的run方法中,NetworkDispatcher也是一样。下面我们看看CacheDispatcher的run方法到底做了什么?

源码:

@Override
public void run() {
    if (DEBUG) VolleyLog.v("start new dispatcher");
    Process.setThreadPriority(Process.THREAD_PRIORITY_BACKGROUND);//设置线程优先级,后台线程

    // Make a blocking call to initialize the cache.(进行阻塞调用以初始化缓存)
    mCache.initialize();

    while (true) {
        try {
            // Get a request from the cache triage queue, blocking until
            // at least one is available.
            final Request<?> request = mCacheQueue.take();
            request.addMarker("cache-queue-take");

            // If the request has been canceled, don't bother dispatching it.
            if (request.isCanceled()) {
                request.finish("cache-discard-canceled");
                continue;
            }

            // Attempt to retrieve this item from cache.
            Cache.Entry entry = mCache.get(request.getCacheKey());
            if (entry == null) {
                request.addMarker("cache-miss");
                // Cache miss; send off to the network dispatcher.
                mNetworkQueue.put(request);
                continue;
            }

            // If it is completely expired, just send it to the network.
            if (entry.isExpired()) {
                request.addMarker("cache-hit-expired");
                request.setCacheEntry(entry);
                mNetworkQueue.put(request);
                continue;
            }

            // We have a cache hit; parse its data for delivery back to the request.
            request.addMarker("cache-hit");
            Response<?> response = request.parseNetworkResponse(
                    new NetworkResponse(entry.data, entry.responseHeaders));
            request.addMarker("cache-hit-parsed");

            if (!entry.refreshNeeded()) {
                // Completely unexpired cache hit. Just deliver the response.
                mDelivery.postResponse(request, response);
            } else {
                // Soft-expired cache hit. We can deliver the cached response,
                // but we need to also send the request to the network for
                // refreshing.
                request.addMarker("cache-hit-refresh-needed");
                request.setCacheEntry(entry);

                // Mark the response as intermediate.
                response.intermediate = true;

                // Post the intermediate response back to the user and have
                // the delivery then forward the request along to the network.
                mDelivery.postResponse(request, response, new Runnable() {
                    @Override
                    public void run() {
                        try {
                            mNetworkQueue.put(request);
                        } catch (InterruptedException e) {
                            // Not much we can do about this.
                        }
                    }
                });
            }

        } catch (InterruptedException e) {
            // We may have been interrupted because it was time to quit.
            if (mQuit) {
                return;
            }
            continue;
        }
    }
}

源码分析:

我们可以看到首先会设置线程的优先级别为后台线程,然后进行阻塞调用以初始化缓存,最后通过while开启一个无限循环处理事情,我们主要也是看这个while中主要做了什么事情?

1)首先从mCacheQueue列队中获取一个request,注意如果列队中没有这里会阻塞。

2)判断这个request是否被取消了,如果被cancle了则进行下一次循环,去获取和另外的request。

3)尝试从缓存中检索是否存在request所请求的项目,从代码中我们可以看到mCache.get(request.getCacheKey()),然后看看getCacheKey返回的是什么,就是我们请求的url地址,所以说Volley缓存资源的时候使用的就是请求地址。

/**
 * Returns the cache key for this request.  By default, this is the URL.
 */
public String getCacheKey() {
    return getUrl();
}

4)在获取到资源后,紧接着会判断其是否已经过期了,如果过期了就将这个请求转给NetworkDispatcher从网络上重新获取新资源。

5)当我们获取的资源没有过期,说明我们缓存的资源被request的uil匹配到了,注释中说的是命中,也就是这个意思。

6)将命中的资源返回,这里是做了判断的,如果原始数据源不需要刷新,就会把命中的数据直接返回给请求,如果原始数据源需要被刷新,那么这个请求将会重新交给网络分发线程处理,重新去网络上请求数据。

7)过期时间判断,通过的是时间比较,那这个sfotTtl在哪里赋值的呢?有个com.android.volley.toolbox.HttpHeaderParser类,在他的parseCacheHeaders方法中,看下面源码,可以得出结论这个缓存时间是有服务器返回的资源头部说明决定的,可以没有缓存,有缓存的就通过max-age作为key取出缓存时间来。

public boolean refreshNeeded() {
    return this.softTtl < System.currentTimeMillis();
}
public static Cache.Entry parseCacheHeaders(NetworkResponse response) {
    long now = System.currentTimeMillis();

    Map<String, String> headers = response.headers;

    long serverDate = 0;
    long lastModified = 0;
    long serverExpires = 0;
    long softExpire = 0;
    long finalExpire = 0;
    long maxAge = 0;
    long staleWhileRevalidate = 0;
    boolean hasCacheControl = false;
    boolean mustRevalidate = false;

    String serverEtag = null;
    String headerValue;

    headerValue = headers.get("Date");
    if (headerValue != null) {
        serverDate = parseDateAsEpoch(headerValue);
    }

    headerValue = headers.get("Cache-Control");
    if (headerValue != null) {
        hasCacheControl = true;
        String[] tokens = headerValue.split(",");
        for (int i = 0; i < tokens.length; i++) {
            String token = tokens[i].trim();
            if (token.equals("no-cache") || token.equals("no-store")) {
                return null;
            } else if (token.startsWith("max-age=")) {
                try {
                    maxAge = Long.parseLong(token.substring(8));
                } catch (Exception e) {
                }
            } else if (token.startsWith("stale-while-revalidate=")) {
                try {
                    staleWhileRevalidate = Long.parseLong(token.substring(23));
                } catch (Exception e) {
                }
            } else if (token.equals("must-revalidate") || token.equals("proxy-revalidate")) {
                mustRevalidate = true;
            }
        }
    }

    headerValue = headers.get("Expires");
    if (headerValue != null) {
        serverExpires = parseDateAsEpoch(headerValue);
    }

    headerValue = headers.get("Last-Modified");
    if (headerValue != null) {
        lastModified = parseDateAsEpoch(headerValue);
    }

    serverEtag = headers.get("ETag");

    // Cache-Control takes precedence over an Expires header, even if both exist and Expires
    // is more restrictive.
    if (hasCacheControl) {
        softExpire = now + maxAge * 1000;
        finalExpire = mustRevalidate
                ? softExpire
                : softExpire + staleWhileRevalidate * 1000;
    } else if (serverDate > 0 && serverExpires >= serverDate) {
        // Default semantic for Expire header in HTTP specification is softExpire.
        softExpire = now + (serverExpires - serverDate);
        finalExpire = softExpire;
    }

    Cache.Entry entry = new Cache.Entry();
    entry.data = response.data;
    entry.etag = serverEtag;
    entry.softTtl = softExpire;
    entry.ttl = finalExpire;
    entry.serverDate = serverDate;
    entry.lastModified = lastModified;
    entry.responseHeaders = headers;

    return entry;
}

NetworkDispatcher

NetworkDispatcher的run方法是什么时候开始运行的,上面已经讲了,下面直接看源码分析,这里同样会通过while开启一个无限循环,不断重请求列队中获取request

1)从列队中获取request

2)入股欧能获取到,检查当前request是否被cancle了

3)当SDK > 14 会根据当前request给线程标记tag,源码中的解释是:设置活动标记,以便在计算来自当前线程的{@link Socket}流量时使用。每个线程只支持一个活动标记。

4)网络通过request请求资源

5)看是否当前请求被重复了,如果重复了就不请求了

6)解析服务器返回的response

7)检查是否允许缓存,如果允许则缓存本地

8)将根据当前request获取到资源返回

源码

@Override
public void run() {
    Process.setThreadPriority(Process.THREAD_PRIORITY_BACKGROUND);
    while (true) {
        long startTimeMs = SystemClock.elapsedRealtime();
        Request<?> request;
        try {
            // Take a request from the queue.
            request = mQueue.take();
        } catch (InterruptedException e) {
            // We may have been interrupted because it was time to quit.
            if (mQuit) {
                return;
            }
            continue;
        }

        try {
            request.addMarker("network-queue-take");

            // If the request was cancelled already, do not perform the
            // network request.
            if (request.isCanceled()) {
                request.finish("network-discard-cancelled");
                continue;
            }

            addTrafficStatsTag(request);

            // Perform the network request.
            NetworkResponse networkResponse = mNetwork.performRequest(request);
            request.addMarker("network-http-complete");

            // If the server returned 304 AND we delivered a response already,
            // we're done -- don't deliver a second identical response.
            if (networkResponse.notModified && request.hasHadResponseDelivered()) {
                request.finish("not-modified");
                continue;
            }

            // Parse the response here on the worker thread.
            Response<?> response = request.parseNetworkResponse(networkResponse);
            request.addMarker("network-parse-complete");

            // Write to cache if applicable.
            // TODO: Only update cache metadata instead of entire record for 304s.
            if (request.shouldCache() && response.cacheEntry != null) {
                mCache.put(request.getCacheKey(), response.cacheEntry);
                request.addMarker("network-cache-written");
            }

            // Post the response back.
            request.markDelivered();
            mDelivery.postResponse(request, response);
        } catch (VolleyError volleyError) {
            volleyError.setNetworkTimeMs(SystemClock.elapsedRealtime() - startTimeMs);
            parseAndDeliverNetworkError(request, volleyError);
        } catch (Exception e) {
            VolleyLog.e(e, "Unhandled exception %s", e.toString());
            VolleyError volleyError = new VolleyError(e);
            volleyError.setNetworkTimeMs(SystemClock.elapsedRealtime() - startTimeMs);
            mDelivery.postError(request, volleyError);
        }
    }
}

到此Volley源码的大流程就如上所说,如果有看官需要分析缓存流程请留言

猜你喜欢

转载自blog.csdn.net/weixin_36709064/article/details/81568771