Rust 是什么原因导致超HTTP响应的截断难以重现?
我遇到一个错误,我的超HTTP响应被截断为特定大小(7829字节)。使用cURL发出相同的请求很好 请求向JSON端点查询数据。然后对响应结构进行大量的调整,因为使用了一个相对复杂的速率限制过程来同时发出许多这样的请求。但是,如果只发出一个请求,响应仍然会被截断 在实现速率限制和进行一些重大重构之前,程序正确地做出了这些响应 我在下面做了一个最小的例子,但它无法重现这个问题。在这一点上,我不知道该去哪里找。代码库相当复杂,迭代扩展复制示例非常困难,特别是当我不知道是什么原因可能导致这种情况时 Hyper的响应体可能会以哪些方式被截断?在下面的Rust 是什么原因导致超HTTP响应的截断难以重现?,rust,hyper,Rust,Hyper,我遇到一个错误,我的超HTTP响应被截断为特定大小(7829字节)。使用cURL发出相同的请求很好 请求向JSON端点查询数据。然后对响应结构进行大量的调整,因为使用了一个相对复杂的速率限制过程来同时发出许多这样的请求。但是,如果只发出一个请求,响应仍然会被截断 在实现速率限制和进行一些重大重构之前,程序正确地做出了这些响应 我在下面做了一个最小的例子,但它无法重现这个问题。在这一点上,我不知道该去哪里找。代码库相当复杂,迭代扩展复制示例非常困难,特别是当我不知道是什么原因可能导致这种情况时 H
handle
函数中获取响应主体
#![feature(use_nested_groups)]
extern crate futures;
extern crate hyper;
extern crate hyper_tls;
extern crate tokio_core;
use futures::{Future, Stream};
use hyper::{Body, Chunk, Client, Method, Request, Response};
use hyper_tls::HttpsConnector;
use tokio_core::reactor::Core;
use std::env;
fn main() {
let mut core = Core::new().unwrap();
let client = Client::configure()
.connector(HttpsConnector::new(4, &core.handle()).unwrap())
.build(&core.handle());
fn handle(response: Response<Body>) -> Box<Future<Item = usize, Error = hyper::Error>> {
Box::new(
response
.body()
.concat2()
.map(move |body: Chunk| -> usize { body.len() }),
)
}
let args: Vec<String> = env::args().collect();
let uri = &args[1];
let req = Request::new(Method::Get, uri.parse().unwrap());
let response_body_length = {
let future = Box::new(client.request(req).map(handle).flatten());
core.run(future).unwrap()
};
println!("response body length: {}", response_body_length);
}
#![特征(使用嵌套组)]
外部板条箱期货;
外部板条箱;
外部板条箱hyper_tls;
外部板条箱东京大学核心;
使用未来:{Future,Stream};
使用hyper::{Body,Chunk,Client,Method,Request,Response};
使用hyper_tls::HttpsConnector;
使用tokio_堆芯::反应堆::堆芯;
使用std::env;
fn main(){
让mut core=core::new().unwrap();
让client=client::configure()
.connector(HttpsConnector::new(4,&core.handle()).unwrap())
.build(&core.handle());
fn句柄(响应:响应)->框{
盒子:新的(
响应
.body()
.concat2()
.map(move | body:Chunk |->usize{body.len()}),
)
}
让args:Vec=env::args().collect();
让uri=&args[1];
让req=Request::new(方法::Get,uri.parse().unwrap());
让响应\正文\长度={
let future=Box::new(client.request(req).map(handle.flatte());
core.run(future.unwrap)()
};
println!(“响应体长度:{}”,响应体长度);
}
违规代码:
extern crate serde;
extern crate serde_json;
use futures::{future, stream, Future, Stream};
use hyper;
use hyper::{client, Body, Chunk, Client, Headers, Method, Request, Response, header::Accept,
header::Date as DateHeader, header::RetryAfter};
use hyper_tls::HttpsConnector;
use tokio_core::reactor::Core;
use models::Bucket;
use std::thread;
use std::time::{Duration, UNIX_EPOCH};
use std::str;
header! { (XRateLimitRemaining, "x-ratelimit-remaining") => [String] }
#[derive(Debug)]
struct Uri(pub String);
const MAX_REQ_SIZE: u32 = 500;
fn make_uri(symbol: &str, page_ix: u32) -> Uri {
Uri(format!(
"https://www.bitmex.com/api/v1/trade/bucketed?\
symbol={symbol}&\
columns={columns}&\
partial=false&\
reverse=true&\
binSize={bin_size}&\
count={count}&\
start={start}",
symbol = symbol,
columns = "close,timestamp",
bin_size = "5m",
count = MAX_REQ_SIZE,
start = 0 + MAX_REQ_SIZE * page_ix
))
}
#[derive(Debug)]
struct RateLimitInfo {
remaining_reqs: u32,
retry_after: Option<Duration>,
}
impl RateLimitInfo {
fn default() -> RateLimitInfo {
RateLimitInfo {
remaining_reqs: 1,
retry_after: None,
}
}
fn from<T>(resp: &Response<T>) -> RateLimitInfo {
let headers = resp.headers();
let remaining_reqs = headers
.get::<XRateLimitRemaining>()
.unwrap_or_else(|| panic!("x-ratelimit-remaining not on request."))
.parse()
.unwrap();
let retry_after = match headers.get::<RetryAfter>() {
Some(RetryAfter::Delay(duration)) => Some(*duration),
_ => None,
};
RateLimitInfo {
remaining_reqs,
retry_after,
}
}
}
fn resp_dated_later<'a>(a: &'a Response<Body>, b: &'a Response<Body>) -> &'a Response<Body> {
let get_date = |resp: &Response<Body>| {
let headers: &Headers = resp.headers();
**headers.get::<DateHeader>().unwrap()
};
if get_date(&a) > get_date(&b) {
a
} else {
b
}
}
#[derive(Debug)]
struct Query {
uri: Uri,
response: Option<Response<Body>>,
}
impl Query {
fn from_uri(uri: Uri) -> Query {
Query {
uri: uri,
response: None,
}
}
}
fn query_good(q: &Query) -> bool {
match &q.response {
Some(response) => response.status().is_success(),
_ => false,
}
}
type HttpsClient = hyper::Client<HttpsConnector<client::HttpConnector>>;
type FutureQuery = Box<Future<Item = Query, Error = hyper::Error>>;
fn to_future(x: Query) -> FutureQuery {
Box::new(future::ok(x))
}
fn exec_if_needed(client: &HttpsClient, query: Query) -> FutureQuery {
fn exec(client: &HttpsClient, q: Query) -> FutureQuery {
println!("exec: {:?}", q);
let uri = q.uri;
let req = {
let mut req = Request::new(Method::Get, uri.0.parse().unwrap());
req.headers_mut().set(Accept::json());
req
};
Box::new(
client
.request(req)
.inspect(|resp| println!("HTTP {}", resp.status()))
.map(|resp| Query {
uri: uri,
response: Some(resp),
}),
)
}
if query_good(&query) {
to_future(query)
} else {
exec(client, query)
}
}
type BoxedFuture<T> = Box<Future<Item = T, Error = hyper::Error>>;
fn do_batch(client: &HttpsClient, queries: Vec<Query>) -> BoxedFuture<Vec<Query>> {
println!("do_batch() {} queries", queries.len());
let exec_if_needed = |q| exec_if_needed(client, q);
let futures = queries.into_iter().map(exec_if_needed);
println!("do_batch() futures {:?}", futures);
Box::new(
stream::futures_ordered(futures).collect(), //future::join_all(futures)
)
}
fn take<T>(right: &mut Vec<T>, suggested_n: usize) -> Vec<T> {
let n: usize = if right.len() < suggested_n {
right.len()
} else {
suggested_n
};
let left = right.drain(0..n);
return left.collect();
}
type BoxedResponses = Box<Vec<Response<Body>>>;
fn batched_throttle(uris: Vec<Uri>) -> BoxedResponses {
println!("batched_throttle({} uris)", uris.len());
let mut core = Core::new().unwrap();
let client = Client::configure()
.connector(HttpsConnector::new(4, &core.handle()).unwrap())
.build(&core.handle());
let mut rate_limit_info = RateLimitInfo::default();
let mut queries_right: Vec<Query> = uris.into_iter().map(Query::from_uri).collect();
loop {
let mut queries_left: Vec<Query> = Vec::with_capacity(queries_right.len());
println!("batched_throttle: starting inner loop");
loop {
// throttle program during testing
thread::sleep(Duration::from_millis(800));
println!("batched_throttle: {:?}", rate_limit_info);
if let Some(retry_after) = rate_limit_info.retry_after {
println!("batched_throttle: retrying after {:?}", retry_after);
thread::sleep(retry_after)
}
if queries_right.is_empty() {
break;
}
let mut queries_mid = {
let ri_count = rate_limit_info.remaining_reqs;
let iter_req_count = if ri_count == 0 { 1 } else { ri_count };
println!("batched_throttle: iter_req_count {}", iter_req_count);
take(&mut queries_right, iter_req_count as usize)
};
println!(
"batched_throttle: \
queries_right.len() {}, \
queries_left.len() {}, \
queries_mid.len() {})",
queries_right.len(),
queries_left.len(),
queries_mid.len()
);
if queries_mid.iter().all(query_good) {
println!("batched_throttle: queries_mid.iter().all(query_good)");
continue;
}
queries_mid = { core.run(do_batch(&client, queries_mid)).unwrap() };
rate_limit_info = {
let create_very_old_response =
|| Response::new().with_header(DateHeader(UNIX_EPOCH.into()));
let very_old_response = create_very_old_response();
let last_resp = queries_mid
.iter()
.map(|q| match &q.response {
Some(r) => r,
_ => panic!("Impossible"),
})
.fold(&very_old_response, resp_dated_later);
RateLimitInfo::from(&last_resp)
};
&queries_left.append(&mut queries_mid);
}
queries_right = queries_left;
if queries_right.iter().all(query_good) {
break;
}
}
println!(
"batched_throttle: finishing. queries_right.len() {}",
queries_right.len()
);
Box::new(
queries_right
.into_iter()
.map(|q| q.response.unwrap())
.collect(),
)
}
fn bucket_count_to_req_count(bucket_count: u32) -> u32 {
let needed_req_count = (bucket_count as f32 / MAX_REQ_SIZE as f32).ceil() as u32;
return needed_req_count;
}
type BoxedBuckets = Box<Vec<Bucket>>;
fn response_to_buckets(response: Response<Body>) -> BoxedFuture<Vec<Bucket>> {
Box::new(response.body().concat2().map(|body: Chunk| -> Vec<Bucket> {
println!("body.len(): {}", body.len());
println!("JSON: {}", str::from_utf8(&body).unwrap());
serde_json::from_slice(&body).unwrap()
}))
}
pub fn get_n_last(symbol: &str, bucket_count: u32) -> BoxedBuckets {
let req_count = bucket_count_to_req_count(bucket_count);
let uris = (0..req_count)
.map(|page_ix| make_uri(symbol, page_ix))
.collect();
let responses = batched_throttle(uris);
let mut core = Core::new().unwrap();
let boxed_buckets = {
let futures = responses.into_iter().map(response_to_buckets);
let future = stream::futures_ordered(futures).collect();
let groups_of_buckets = core.run(future).unwrap();
Box::new(
groups_of_buckets
.into_iter()
.flat_map(|bs| bs.into_iter())
.rev()
.collect(),
)
};
return boxed_buckets;
}
extern板条箱系列;
外部板条箱serde_json;
使用未来:{future,stream,future,stream};
使用超链接;
使用hyper::{client,Body,Chunk,client,Headers,Method,Request,Response,header::Accept,
header::dateas DateHeader,header::RetryAfter};
使用hyper_tls::HttpsConnector;
使用tokio_堆芯::反应堆::堆芯;
使用型号::桶;
使用std::线程;
使用std::time::{Duration,UNIX_EPOCH};
使用std::str;
头球!{(XRateLimitRemaining,“x-ratelimit-remaining”)=>[String]}
#[导出(调试)]
结构Uri(发布字符串);
const MAX_REQ_SIZE:u32=500;
fn make_uri(符号:&str,第九页:u32)->uri{
Uri(格式(
"https://www.bitmex.com/api/v1/trade/bucketed?\
symbol={symbol}&\
列={columns}&\
部分=错误&\
反向=真&\
binSize={bin_size}&\
计数={count}&\
开始={start}“,
符号=符号,
columns=“close,timestamp”,
bin_size=“5m”,
计数=最大需求大小,
开始=0+最大需求大小*第九页
))
}
#[导出(调试)]
结构速率限制信息{
剩余需求:u32,
在:选项之后重试,
}
impl RateLimitInfo{
fn default()->RateLimitInfo{
费率限制信息{
剩余需求:1,
在以下情况后重试:无,
}
}
fn来源(响应:&Response)->RateLimitInfo{
让headers=resp.headers();
让剩余的需求=标题
.get::()
.unwrap|u或| else(| | panic!(“x-ratelimit-未按要求保留”))
.parse()
.unwrap();
让我们在=匹配头之后重试。获取::(){
一些(RetryAfter::Delay(duration))=>Some(*duration),
_=>没有,
};
费率限制信息{
剩余需求,
请稍后再试,
}
}
}
fn resp_dated_后获取日期(&b){
A.
}否则{
B
}
}
#[导出(调试)]
结构查询{
uri:uri,
答复:选择,
}
impl查询{
fn from_uri(uri:uri)->查询{
质疑{
uri:uri,
答复:无,
}
}
}
fn query\u good(q:&query)->bool{
匹配与q.response{
Some(response)=>response.status()是_success(),
_=>错误,
}
}
键入HttpsClient=hyper::Client;
输入FutureQuery=Box;
fn to_future(x:Query)->FutureQuery{
框::新(未来::确定(x))
}
fn exec\u如果需要(客户端:&HttpsClient,查询:query)->FutureQuery{
fn exec(客户端:&HttpsClient,q:Query)->FutureQuery{
println!(“exec:{:?}”,q);
设uri=q.uri;
let req={
让mut req=Request::new(方法::Get,uri.0.parse().unwrap());
req.headers_mut().set(Accept::json());
请求
};
盒子:新的(
客户
.请求(req)
.inspect(| resp | println!(“HTTP{}”,resp.status()))
.map(| resp |查询{
uri:uri,
答复:一些(分别),
}),
)
}
如果查询良好(&query){
到未来(查询)
}否则{
执行官(客户机、查询)
}
}
类型BoxedFuture=Box;
fn do_批处理(客户端:&HttpsClient,查询:Vec)->BoxedFuture{
println!(“do_batch(){}querys”,querys.len());
让exec_if|u needed=| q | exec_if|u needed(客户,q);
让futures=querys.into_iter().map(如果需要,执行);
println!(“do_batch()futures{:?}”,futures);
盒子:新的(
stream::futures\u ordered(futures).collect(),//future::join\u all(futures)
)
}
fn take(右:&mut Vec,建议使用)->Vec{
设n:usize=if right.len(){
对,len()
}否则{
建议
};
设左=右。排水管(0..n);
返回left.collect();
}
BoxedResponses类型=Box;
fn批量节流阀(URI:Vec)->盒式响应{
println!(({}uris