te')); return $arr; } /* 遍历用户所有主题 * @param $uid 用户ID * @param int $page 页数 * @param int $pagesize 每页记录条数 * @param bool $desc 排序方式 TRUE降序 FALSE升序 * @param string $key 返回的数组用那一列的值作为 key * @param array $col 查询哪些列 */ function thread_tid_find_by_uid($uid, $page = 1, $pagesize = 1000, $desc = TRUE, $key = 'tid', $col = array()) { if (empty($uid)) return array(); $orderby = TRUE == $desc ? -1 : 1; $arr = thread_tid__find($cond = array('uid' => $uid), array('tid' => $orderby), $page, $pagesize, $key, $col); return $arr; } // 遍历栏目下tid 支持数组 $fid = array(1,2,3) function thread_tid_find_by_fid($fid, $page = 1, $pagesize = 1000, $desc = TRUE) { if (empty($fid)) return array(); $orderby = TRUE == $desc ? -1 : 1; $arr = thread_tid__find($cond = array('fid' => $fid), array('tid' => $orderby), $page, $pagesize, 'tid', array('tid', 'verify_date')); return $arr; } function thread_tid_delete($tid) { if (empty($tid)) return FALSE; $r = thread_tid__delete(array('tid' => $tid)); return $r; } function thread_tid_count() { $n = thread_tid__count(); return $n; } // 统计用户主题数 大数量下严谨使用非主键统计 function thread_uid_count($uid) { $n = thread_tid__count(array('uid' => $uid)); return $n; } // 统计栏目主题数 大数量下严谨使用非主键统计 function thread_fid_count($fid) { $n = thread_tid__count(array('fid' => $fid)); return $n; } ?>javascript - Webpack-dev-server proxy requests very slow - Stack Overflow
最新消息:雨落星辰是一个专注网站SEO优化、网站SEO诊断、搜索引擎研究、网络营销推广、网站策划运营及站长类的自媒体原创博客

javascript - Webpack-dev-server proxy requests very slow - Stack Overflow

programmeradmin2浏览0评论

I am using webpack-dev-server proxy:

devServer: {
        proxy: {
            '/api': {
                target: '',
                changeOrigin: true
            }
        }
    }

Requests take too much time. Chrome network panel shows this:

Why this happens? How to solve this problem?

I am using webpack-dev-server proxy:

devServer: {
        proxy: {
            '/api': {
                target: 'http://mybackedn.url',
                changeOrigin: true
            }
        }
    }

Requests take too much time. Chrome network panel shows this:

Why this happens? How to solve this problem?

Share Improve this question edited Dec 23, 2016 at 16:45 Sergey Tyupaev asked Dec 23, 2016 at 16:30 Sergey TyupaevSergey Tyupaev 1,27310 silver badges24 bronze badges 6
  • Have you resolved the issue somehow, please? i have similar problem – VladoDemcak Commented Feb 23, 2018 at 21:13
  • have you make a configuration on your DNS? it needs some time and then it works again efficiently – Paraskevas Ntsounos Commented Jun 25, 2018 at 12:45
  • 2 Maybe related to this issue : github./webpack/webpack-dev-server/issues/161 (though I am not sure why bounty such an old question, since I doubt OP will answer any question on their problem) – Seblor Commented Jun 25, 2018 at 12:47
  • Is the grapic a filtered list of network activity? If so it looks consistent with standard queuing for more than 6 connections from a single source. – lossleader Commented Jul 1, 2018 at 11:54
  • can you attach the screenshot hovering the waterfall graphs that you marked in the above screen shot – karthik Commented Jul 1, 2018 at 13:17
 |  Show 1 more ment

3 Answers 3

Reset to default 5 +25

The grey part in the request time graph is called stalled-time and the light-grey part(after grey) is queuing time. You can see the same if hover on the waterfall graphs. Here is whats causing issue and what stalled time means.

Stalled/Blocking

Time the request spent waiting before it could be sent. This time is inclusive of any time spent in proxy negotiation. Additionally, this time will include when the browser is waiting for an already established connection to bee available for re-use, obeying Chrome's maximum six TCP connection per origin rule.

(If you forget, Chrome has an "Explanation" link in the hover tooltip and under the "Timing" panel.)

Basically, the primary reason you will see this is because Chrome will only download 6 files per-server at a time and other requests will be stalled until a connection slot bees available.

This isn't necessarily something that needs fixing, but one way to avoid the stalled state would be to distribute the files across multiple domain names and/or servers, keeping CORS in mind if applicable to your needs, however HTTP2 is probably a better option going forward. Resource bundling (like JS and CSS concatenation) can also help to reduce amount of stalled connections.

Alternatively you can de-priotise the requests and triggeer then at the end that are taking long time so that rest of the requests wont wait for the slow runners.

I was facing similar issues where every proxied request took 5 seconds or more with a setup something like this:

"proxy": [
    {
      "context": [
        "/api",
      ],
      "target": "http://my-backend-server.local:1234",
      "secure": false
    }
  ]

And in the hosts file:

127.0.0.1    my-backend-server.local
127.0.0.1    some-other-hostname.local
127.0.0.1    a-few-more-of-these.local

When I changed the proxy to point to the IPv6 loopback address the problem went away. So like this:

"proxy": [
    {
      "context": [
        "/api",
      ],
      "target": "http://[::1]:1234",
      "secure": false
    }
  ]

To be able to use the actual hostname in the proxy configuration instead of the loopback address, I edited my hosts file to contain all hostname entries on a single line and point them to both IPv4 and IPv6 loopback addresses. So like this:

127.0.0.1    my-backend-server.local some-other-hostname.local a-few-more-of-these.local
::1          my-backend-server.local some-other-hostname.local a-few-more-of-these.local

Now the latency is gone and it works as expected.

devServer: {
   proxy: {
      '/api': {
        target: 'http://mybackedn.url',
        changeOrigin: true,
        headers: {
          Connection: 'keep-alive' // add this can fix this issue
        }
      }
   }
}
发布评论

评论列表(0)

  1. 暂无评论