te')); return $arr; } /* 遍历用户所有主题 * @param $uid 用户ID * @param int $page 页数 * @param int $pagesize 每页记录条数 * @param bool $desc 排序方式 TRUE降序 FALSE升序 * @param string $key 返回的数组用那一列的值作为 key * @param array $col 查询哪些列 */ function thread_tid_find_by_uid($uid, $page = 1, $pagesize = 1000, $desc = TRUE, $key = 'tid', $col = array()) { if (empty($uid)) return array(); $orderby = TRUE == $desc ? -1 : 1; $arr = thread_tid__find($cond = array('uid' => $uid), array('tid' => $orderby), $page, $pagesize, $key, $col); return $arr; } // 遍历栏目下tid 支持数组 $fid = array(1,2,3) function thread_tid_find_by_fid($fid, $page = 1, $pagesize = 1000, $desc = TRUE) { if (empty($fid)) return array(); $orderby = TRUE == $desc ? -1 : 1; $arr = thread_tid__find($cond = array('fid' => $fid), array('tid' => $orderby), $page, $pagesize, 'tid', array('tid', 'verify_date')); return $arr; } function thread_tid_delete($tid) { if (empty($tid)) return FALSE; $r = thread_tid__delete(array('tid' => $tid)); return $r; } function thread_tid_count() { $n = thread_tid__count(); return $n; } // 统计用户主题数 大数量下严谨使用非主键统计 function thread_uid_count($uid) { $n = thread_tid__count(array('uid' => $uid)); return $n; } // 统计栏目主题数 大数量下严谨使用非主键统计 function thread_fid_count($fid) { $n = thread_tid__count(array('fid' => $fid)); return $n; } ?>javascript - Why are these fetch methods asynchronous? - Stack Overflow
最新消息:雨落星辰是一个专注网站SEO优化、网站SEO诊断、搜索引擎研究、网络营销推广、网站策划运营及站长类的自媒体原创博客

javascript - Why are these fetch methods asynchronous? - Stack Overflow

programmeradmin2浏览0评论

Fetch is the new Promise-based API for making network requests:

fetch('/')
  .then(response => console.log('status: ', response.status));

This makes sense to me - when we initiate a network call, we return a Promise which lets our thread carry on with other business. When the response is available, the code inside the Promise executes.

However, if I'm interested in the payload of the response, I do so via methods of the response, not properties:

  • arrayBuffer()
  • blob()
  • formData()
  • json()
  • text()

These methods return promises, and I'm unclear as to why.

fetch('/') //IO bound
  .then(response => response.json()); //We now have the response, so this operation is CPU bound - isn't it?
  .then(entity => console.log(entity.name));

Why would processing the response's payload return a promise - it's unclear to me why it should be an async operation.

Fetch is the new Promise-based API for making network requests:

fetch('https://www.everythingisawesome./')
  .then(response => console.log('status: ', response.status));

This makes sense to me - when we initiate a network call, we return a Promise which lets our thread carry on with other business. When the response is available, the code inside the Promise executes.

However, if I'm interested in the payload of the response, I do so via methods of the response, not properties:

  • arrayBuffer()
  • blob()
  • formData()
  • json()
  • text()

These methods return promises, and I'm unclear as to why.

fetch('https://www.everythingisawesome./') //IO bound
  .then(response => response.json()); //We now have the response, so this operation is CPU bound - isn't it?
  .then(entity => console.log(entity.name));

Why would processing the response's payload return a promise - it's unclear to me why it should be an async operation.

Share Improve this question edited Mar 16, 2022 at 14:24 VLAZ 29.1k9 gold badges62 silver badges84 bronze badges asked Aug 26, 2016 at 16:13 Mister EpicMister Epic 16.7k13 gold badges86 silver badges155 bronze badges 9
  • "it's unclear to me why it should be an async operation." Not certain what Question is? Reasons the decision was made to provide methods at Response object which return response in different formats using fetch()? Or reasons the process which returns different formats of response asynchronous? – guest271314 Commented Aug 29, 2016 at 1:26
  • "it's unclear to me why it should be an async operation" Why should it be a synchronous operation? – a better oliver Commented Aug 30, 2016 at 12:26
  • 1 Because it appears at first glance to be a CPU bound operation. – Mister Epic Commented Aug 30, 2016 at 13:12
  • 1 @ZuzEL What a toxic attitude. What you can do is to explain your view. That's how discussions work. – a better oliver Commented Oct 16, 2023 at 11:54
  • 1 @abetteroliver I don't normally talk like this, but all the things you said just make me give up before starting any discussion. To be as concise as possible: (a) nodejs exists (b) browsers have workers where you don't block UI (c) let me decide if I want to put all my resources into CPU intensive task or not. Plus the fact you fall into the infinite loop in your very first attempt to answer a question, quote "it's unclear to me why it should be an async operation" Why should it be a synchronous operation? makes my toxic attitude absolutely justified – ZuzEL Commented Oct 16, 2023 at 15:04
 |  Show 4 more ments

4 Answers 4

Reset to default 11 +100

Why are these fetch methods asynchronous?

The naïve answer is "because the specification says so"

  • The arrayBuffer() method, when invoked, must return the result of running consume body with ArrayBuffer.
  • The blob() method, when invoked, must return the result of running consume body with Blob.
  • The formData() method, when invoked, must return the result of running consume body with FormData.
  • The json() method, when invoked, must return the result of running consume body with JSON.
  • The text() method, when invoked, must return the result of running consume body with text.

Of course, that doesn't really answer the question because it leaves open the question of "Why does the spec say so?"

And this is where it gets plicated, because I'm certain of the reasoning, but I have no evidence from an official source to prove it. I'm going to attempt to explain the rational to the best of my understanding, but be aware that everything after here should be treated largely as outside opinion.


When you request data from a resource using the fetch API, you have to wait for the resource to finish downloading before you can use it. This should be reasonably obvious. JavaScript uses asynchronous APIs to handle this behavior so that the work involved doesn't block other scripts, and—more importantly—the UI.

When the resource has finished downloading, the data might be enormous. There's nothing that prevents you from requesting a monolithic JSON object that exceeds 50MB.

What do you think would happen if you attempted to parse 50MB of JSON synchronously? It would block other scripts, and—more importantly—the UI.

Other programmers have already solved how to handle large amounts of data in a performant manner: Streams. In JavaScript, streams are implemented using an asynchronous API so that they don't block, and if you read the consume body details, it's clear that streams are being used to parse the data:

Let stream be body's stream if body is non-null, or an empty ReadableStream object otherwise.

Now, it's certainly possible that the spec could have defined two ways of accessing the data: one synchronous API meant for smaller amounts of data, and one asynchronous API for larger amounts of data, but this would lead to confusion and duplication.

Besides Ya Ain't Gonna Need It. Everything that can be expressed using synchronous code can be expressed in asynchronous code. The reverse is not true. Because of this, a single asynchronous API was created that could handle all use cases.

Because the content is not transferred until you start reading it. The headers e first.

Looking at the implementation here the operation of fetching json is CPU bound because the creation of the response, along with the body is done once the response promise is done. See the implementation of the json function

That being said, I think that is mostly a design concept so you can chain your promise handlers and only use a single error handler that kicks in, no matter in what stage the error happened.

Like this:

fetch('https://www.everythingisawesome./')
  .then(function(response) {
    return response.json()
  })
  .then(function(json) {
    console.log('parsed json', json)
  })
  .catch(function(ex) {
    console.log('parsing or loading failed', ex)
  })

The creation of the already resolved promises is implemented with a pretty low overhead. In the end it is not required to use a promise here, but it makes for better looking code that can be written. At least in my opinion.

After reading through the implementation of fetch, it seems that promises are used for a few reasons. For starters, json() relies on a FileReader to convert the response blob into text. FileReaders can't be used until the onload callback, so that's where the promise chain starts.

function fileReaderReady(reader) {
  return new Promise(function(resolve, reject) {
    reader.onload = function() {
      resolve(reader.result)
    }
    reader.onerror = function() {
      reject(reader.error)
    }
  })
}

From there, additional promises are used to encapsulate particular errors that might occur and propagating them up to the caller. For example, there are errors that can occur if the body has already be read once before, if the blob doesn't convert to text, and if the text doesn't convert to JSON. Promises are convenient here because any of these various errors will simply end up in the catch block of the caller.

So in conclusion, a promised based api is used for reading fetch responses because: 1. They rely on a FileReader which has to initialize itself asynchronously. 2. fetch would like to propagate a wide variety of errors that may occur in reading the body. Promises allow a uniform way to do this.

发布评论

评论列表(0)

  1. 暂无评论