perf: refactor bench

This commit is contained in:
Vincent Herlemont
2023-10-29 09:45:00 +01:00
parent f806be1706
commit 7b2d59ef1b
6 changed files with 10 additions and 153 deletions

View File

@@ -37,13 +37,5 @@ default = ["serde", "bincode_1_3"]
name = "overhead"
harness = false
[[bench]]
name = "overhead_on_bincode"
harness = false
[[bench]]
name = "prepend_bytes"
harness = false
[build-dependencies]
skeptic = "0.13"

View File

@@ -1,16 +1,8 @@
use bincode::{Decode, Encode};
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use native_model_macro::native_model;
fn native_model_encode_body<T: Encode>(obj: &T) -> Result<Vec<u8>, bincode::error::EncodeError> {
bincode::encode_to_vec(obj, bincode::config::standard())
}
fn native_model_decode_body<T: Decode>(data: Vec<u8>) -> Result<T, bincode::error::DecodeError> {
bincode::decode_from_slice(&data, bincode::config::standard()).map(|(result, _)| result)
}
#[derive(Encode, Decode)]
use serde::{Deserialize, Serialize};
use native_model::Model;
#[derive(Serialize, Deserialize)]
#[native_model(id = 1, version = 1)]
struct Data(Vec<u8>);
@@ -31,7 +23,7 @@ fn criterion_benchmark(c: &mut Criterion) {
// encode
let data = Data(vec![1; nb_bytes]);
let mut encode_body = native_model_encode_body(&data).unwrap();
let mut encode_body = data.native_model_encode_body().unwrap();
group.bench_function(BenchmarkId::new("encode", nb_bytes), |b| {
b.iter(|| wrap(&mut encode_body))
});

View File

@@ -1,88 +0,0 @@
use bincode::{Decode, Encode};
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use native_model_macro::native_model;
#[derive(Encode, Decode)]
struct DataForBincode {
x: i32,
string: String,
}
// Encode 1 data with bincode
fn native_model_encode_body<T: Encode>(obj: &T) -> Result<Vec<u8>, bincode::error::EncodeError> {
bincode::encode_to_vec(obj, bincode::config::standard())
}
fn native_model_decode_body<T: Decode>(data: Vec<u8>) -> Result<T, bincode::error::DecodeError> {
bincode::decode_from_slice(&data, bincode::config::standard()).map(|(result, _)| result)
}
fn encode_with_bincode(data: &DataForBincode) -> Vec<u8> {
native_model_encode_body(data).unwrap()
}
fn decode_with_bincode(data: Vec<u8>) -> DataForBincode {
native_model_decode_body(data).unwrap()
}
fn encode_decode_with_bincode(data: &DataForBincode) -> DataForBincode {
decode_with_bincode(encode_with_bincode(data))
}
#[derive(Encode, Decode)]
#[native_model(id = 1, version = 1)]
struct DataForNativeModel {
x: i32,
string: String,
}
fn encode_with_native_model(data: &DataForNativeModel) -> Vec<u8> {
native_model::encode(data).unwrap()
}
fn decode_with_native_model(data: Vec<u8>) -> DataForNativeModel {
let (data, _) = native_model::decode::<DataForNativeModel>(data).unwrap();
data
}
fn encode_decode_with_native_model(data: &DataForNativeModel) -> DataForNativeModel {
decode_with_native_model(encode_with_native_model(data))
}
fn criterion_benchmark(c: &mut Criterion) {
// Bincode
let data = DataForBincode {
x: 1,
// Set a very long string
string: "Hello".repeat(10000),
};
c.bench_function("encode_with_bincode", |b| {
b.iter(|| encode_with_bincode(black_box(&data)))
});
let encoded_data = encode_with_bincode(&data);
c.bench_function("decode_with_bincode", |b| {
b.iter(|| decode_with_bincode(black_box(encoded_data.clone())))
});
c.bench_function("encode_decode_with_bincode", |b| {
b.iter(|| encode_decode_with_bincode(black_box(&data)))
});
// Native model
let data = DataForNativeModel {
x: 1,
string: "Hello".repeat(10000),
};
c.bench_function("encode_with_native_model", |b| {
b.iter(|| encode_with_native_model(black_box(&data)))
});
let encoded_data = native_model::encode(&data).unwrap();
c.bench_function("decode_with_native_model", |b| {
b.iter(|| decode_with_native_model(black_box(encoded_data.clone())))
});
c.bench_function("encode_decode_with_native_model", |b| {
b.iter(|| encode_decode_with_native_model(black_box(&data)))
});
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);

View File

@@ -1,28 +0,0 @@
/// Found a way to prepend bytes at the beginning of a Vec<u8> with a constant overhead.
use bincode::{Decode, Encode};
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
fn criterion_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("encode");
// 1 byte, 1KB, 1MB, 10MB, 100MB
for nb_bytes in [1, 1024, 1024 * 1024, 10 * 1024 * 1024, 100 * 1024 * 1024].into_iter() {
group.throughput(criterion::Throughput::Bytes(nb_bytes as u64));
let header: Vec<u8> = vec![0; 4];
let mut data: Vec<u8> = vec![1; nb_bytes];
group.bench_function(BenchmarkId::new("prepend_bytes", nb_bytes), |b| {
b.iter(|| {
// Fastest way to prepend bytes to data
let mut header = header.clone();
header.append(&mut data);
// prepend bytes to data
// let mut header = header.clone();
// header.extend_from_slice(&data);
});
});
}
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);

View File

@@ -51,4 +51,9 @@ test_bincode_2_rc:
test_postcard_1_0:
@just _tests_crate '--features postcard_1_0'
test_all: test_no_default test_default test_bincode_1_3 test_bincode_2_rc test_postcard_1_0
test_all: test_no_default test_default test_bincode_1_3 test_bincode_2_rc test_postcard_1_0
bench_overhead:
cargo bench --bench overhead
bench_all: bench_overhead

View File

@@ -1,22 +1,6 @@
use bincode;
use serde::{Deserialize, Serialize};
// fn native_model_encode_body<T: Serialize>(
// model: &T,
// ) -> Result<Vec<u8>, bincode::error::EncodeError> {
// {
// bincode::serde::encode_to_vec(model, bincode::config::standard())
// }
// }
//
// fn native_model_decode_body<T: for<'a> Deserialize<'a>>(
// data: Vec<u8>,
// ) -> Result<T, bincode::error::DecodeError> {
// {
// Ok(bincode::serde::decode_from_slice(&data, bincode::config::standard())?.0)
// }
// }
pub struct Bincode;
impl<T: Serialize> native_model::Encode<T> for Bincode {