Merge pull request #74 from alicemaz/brianm-encoder

Streaming encoder
This commit is contained in:
Marshall Pierce 2018-10-28 09:26:24 -06:00 committed by GitHub
commit 84ad275421
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 763 additions and 3 deletions

View File

@ -3,7 +3,7 @@
- Remove line wrapping. Line wrapping was never a great conceptual fit in this library, and other features (streaming encoding, etc) either couldn't support it or could support only special cases of it with a great increase in complexity. Line wrapping has been pulled out into a [line-wrap](https://crates.io/crates/line-wrap) crate, so it's still available if you need it.
- `Base64Display` creation no longer uses a `Result` because it can't fail, which means its helper methods for common
configs that `unwrap()` for you are no longer needed
- Add a streaming encoder `Write` impl to transparently base64 as you write.
# 0.9.3

View File

@ -6,9 +6,10 @@ extern crate test;
use base64::display;
use base64::{decode, decode_config_buf, decode_config_slice, encode, encode_config_buf,
encode_config_slice, Config, STANDARD};
encode_config_slice, write, Config, STANDARD};
use rand::{Rng, FromEntropy};
use std::io::Write;
use test::Bencher;
#[bench]
@ -86,6 +87,11 @@ fn encode_3kib_slice(b: &mut Bencher) {
do_encode_bench_slice(b, 3 * 1024, STANDARD)
}
#[bench]
fn encode_3kib_reuse_buf_stream(b: &mut Bencher) {
do_encode_bench_stream(b, 3 * 1024, STANDARD)
}
#[bench]
fn encode_3mib(b: &mut Bencher) {
do_encode_bench(b, 3 * 1024 * 1024)
@ -325,6 +331,23 @@ fn do_encode_bench_slice(b: &mut Bencher, size: usize, config: Config) {
});
}
fn do_encode_bench_stream(b: &mut Bencher, size: usize, config: Config) {
let mut v: Vec<u8> = Vec::with_capacity(size);
fill(&mut v);
let mut buf = Vec::new();
b.bytes = v.len() as u64;
buf.reserve(size * 2);
b.iter(|| {
buf.clear();
let mut stream_enc = write::EncoderWriter::new(&mut buf, config);
stream_enc.write_all(&v).unwrap();
stream_enc.flush().unwrap();
});
}
fn fill(v: &mut Vec<u8>) {
let cap = v.capacity();
// weak randomness is plenty; we just want to not be completely friendly to the branch predictor

View File

@ -64,6 +64,7 @@ extern crate byteorder;
mod chunked_encoder;
pub mod display;
pub mod write;
mod tables;
mod encode;
@ -110,7 +111,6 @@ impl CharacterSet {
}
}
/// Contains configuration parameters for base64 encoding
#[derive(Clone, Copy, Debug)]
pub struct Config {

252
src/write/encoder.rs Normal file
View File

@ -0,0 +1,252 @@
use ::encode::encode_to_slice;
use std::{cmp, fmt};
use std::io::{Result, Write};
use {encode_config_slice, Config};
pub(crate) const BUF_SIZE: usize = 1024;
/// The most bytes whose encoding will fit in `BUF_SIZE`
const MAX_INPUT_LEN: usize = BUF_SIZE / 4 * 3;
// 3 bytes of input = 4 bytes of base64, always (because we don't allow line wrapping)
const MIN_ENCODE_CHUNK_SIZE: usize = 3;
/// A `Write` implementation that base64 encodes data before delegating to the wrapped writer.
///
/// Because base64 has special handling for the end of the input data (padding, etc), there's a
/// `finish()` method on this type that encodes any leftover input bytes and adds padding if
/// appropriate. It's called automatically when deallocated (see the `Drop` implementation), but
/// any error that occurs when invoking the underlying writer will be suppressed. If you want to
/// handle such errors, call `finish()` yourself.
///
/// # Examples
///
/// ```
/// use std::io::Write;
///
/// // use a vec as the simplest possible `Write` -- in real code this is probably a file, etc.
/// let mut wrapped_writer = Vec::new();
/// {
/// let mut enc = base64::write::EncoderWriter::new(
/// &mut wrapped_writer, base64::STANDARD);
///
/// // handle errors as you normally would
/// enc.write_all(b"asdf").unwrap();
/// // could leave this out to be called by Drop, if you don't care
/// // about handling errors
/// enc.finish().unwrap();
///
/// }
///
/// // base64 was written to the writer
/// assert_eq!(b"YXNkZg==", &wrapped_writer[..]);
///
/// ```
///
/// # Panics
///
/// Calling `write()` after `finish()` is invalid and will panic.
///
/// # Errors
///
/// Base64 encoding itself does not generate errors, but errors from the wrapped writer will be
/// returned as per the contract of `Write`.
///
/// # Performance
///
/// It has some minor performance loss compared to encoding slices (a couple percent).
/// It does not do any heap allocation.
pub struct EncoderWriter<'a, W: 'a + Write> {
config: Config,
/// Where encoded data is written to
w: &'a mut W,
/// Holds a partial chunk, if any, after the last `write()`, so that we may then fill the chunk
/// with the next `write()`, encode it, then proceed with the rest of the input normally.
extra: [u8; MIN_ENCODE_CHUNK_SIZE],
/// How much of `extra` is occupied, in `[0, MIN_ENCODE_CHUNK_SIZE]`.
extra_len: usize,
/// Buffer to encode into.
output: [u8; BUF_SIZE],
/// True iff padding / partial last chunk has been written.
finished: bool,
/// panic safety: don't write again in destructor if writer panicked while we were writing to it
panicked: bool
}
impl<'a, W: Write> fmt::Debug for EncoderWriter<'a, W> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"extra:{:?} extra_len:{:?} output[..5]: {:?}",
self.extra,
self.extra_len,
&self.output[0..5]
)
}
}
impl<'a, W: Write> EncoderWriter<'a, W> {
/// Create a new encoder around an existing writer.
pub fn new(w: &'a mut W, config: Config) -> EncoderWriter<'a, W> {
EncoderWriter {
config,
w,
extra: [0u8; MIN_ENCODE_CHUNK_SIZE],
extra_len: 0,
output: [0u8; BUF_SIZE],
finished: false,
panicked: false
}
}
/// Encode all remaining buffered data and write it, including any trailing incomplete input
/// triples and associated padding.
///
/// Once this succeeds, no further writes can be performed, as that would produce invalid
/// base64.
///
/// # Errors
///
/// Assuming the wrapped writer obeys the `Write` contract, if this returns `Err`, no data was
/// written, and `finish()` may be retried if appropriate for the type of error, etc.
pub fn finish(&mut self) -> Result<()> {
if self.finished {
return Ok(());
};
if self.extra_len > 0 {
let encoded_len = encode_config_slice(
&self.extra[..self.extra_len],
self.config,
&mut self.output[..],
);
self.panicked = true;
let _ = self.w.write(&self.output[..encoded_len])?;
self.panicked = false;
// write succeeded, do not write the encoding of extra again if finish() is retried
self.extra_len = 0;
}
self.finished = true;
Ok(())
}
}
impl<'a, W: Write> Write for EncoderWriter<'a, W> {
fn write(&mut self, input: &[u8]) -> Result<usize> {
if self.finished {
panic!("Cannot write more after calling finish()");
}
if input.len() == 0 {
return Ok(0);
}
// The contract of `Write::write` places some constraints on this implementation:
// - a call to `write()` represents at most one call to a wrapped `Write`, so we can't
// iterate over the input and encode multiple chunks.
// - Errors mean that "no bytes were written to this writer", so we need to reset the
// internal state to what it was before the error occurred
// how many bytes, if any, were read into `extra` to create a triple to encode
let mut extra_input_read_len = 0;
let mut input = input;
let orig_extra_len = self.extra_len;
let mut encoded_size = 0;
// always a multiple of MIN_ENCODE_CHUNK_SIZE
let mut max_input_len = MAX_INPUT_LEN;
// process leftover stuff from last write
if self.extra_len > 0 {
debug_assert!(self.extra_len < 3);
if input.len() + self.extra_len >= MIN_ENCODE_CHUNK_SIZE {
// Fill up `extra`, encode that into `output`, and consume as much of the rest of
// `input` as possible.
// We could write just the encoding of `extra` by itself but then we'd have to
// return after writing only 4 bytes, which is inefficient if the underlying writer
// would make a syscall.
extra_input_read_len = MIN_ENCODE_CHUNK_SIZE - self.extra_len;
debug_assert!(extra_input_read_len > 0);
// overwrite only bytes that weren't already used. If we need to rollback extra_len
// (when the subsequent write errors), the old leading bytes will still be there.
self.extra[self.extra_len..MIN_ENCODE_CHUNK_SIZE].copy_from_slice(&input[0..extra_input_read_len]);
let len = encode_to_slice(&self.extra[0..MIN_ENCODE_CHUNK_SIZE],
&mut self.output[..],
self.config.char_set.encode_table());
debug_assert_eq!(4, len);
input = &input[extra_input_read_len..];
// consider extra to be used up, since we encoded it
self.extra_len = 0;
// don't clobber where we just encoded to
encoded_size = 4;
// and don't read more than can be encoded
max_input_len = MAX_INPUT_LEN - MIN_ENCODE_CHUNK_SIZE;
// fall through to normal encoding
} else {
// `extra` and `input` are non empty, but `|extra| + |input| < 3`, so there must be
// 1 byte in each.
debug_assert_eq!(1, input.len());
debug_assert_eq!(1, self.extra_len);
self.extra[self.extra_len] = input[0];
self.extra_len += 1;
return Ok(1);
};
} else if input.len() < MIN_ENCODE_CHUNK_SIZE {
// `extra` is empty, and `input` fits inside it
self.extra[0..input.len()].copy_from_slice(input);
self.extra_len = input.len();
return Ok(input.len());
};
// either 0 or 1 complete chunks encoded from extra
debug_assert!(encoded_size == 0 || encoded_size == 4);
debug_assert!(MAX_INPUT_LEN - max_input_len == 0
|| MAX_INPUT_LEN - max_input_len == MIN_ENCODE_CHUNK_SIZE);
// handle complete triples
let input_complete_chunks_len = input.len() - (input.len() % MIN_ENCODE_CHUNK_SIZE);
let input_chunks_to_encode_len = cmp::min(input_complete_chunks_len, max_input_len);
debug_assert_eq!(0, max_input_len % MIN_ENCODE_CHUNK_SIZE);
debug_assert_eq!(0, input_chunks_to_encode_len % MIN_ENCODE_CHUNK_SIZE);
encoded_size += encode_to_slice(
&input[..(input_chunks_to_encode_len)],
&mut self.output[encoded_size..],
self.config.char_set.encode_table(),
);
self.panicked = true;
let r = self.w.write(&self.output[..encoded_size]);
self.panicked = false;
match r {
Ok(_) => return Ok(extra_input_read_len + input_chunks_to_encode_len),
Err(_) => {
// in case we filled and encoded `extra`, reset extra_len
self.extra_len = orig_extra_len;
return r;
}
}
// we could hypothetically copy a few more bytes into `extra` but the extra 1-2 bytes
// are not worth all the complexity (and branches)
}
/// Because this is usually treated as OK to call multiple times, it will *not* flush any
/// incomplete chunks of input or write padding.
fn flush(&mut self) -> Result<()> {
self.w.flush()
}
}
impl<'a, W: Write> Drop for EncoderWriter<'a, W> {
fn drop(&mut self) {
if !self.panicked {
// like `BufWriter`, ignore errors during drop
let _ = self.finish();
}
}
}

479
src/write/encoder_tests.rs Normal file
View File

@ -0,0 +1,479 @@
extern crate rand;
use super::EncoderWriter;
use tests::random_config;
use {encode_config, encode_config_buf, URL_SAFE, STANDARD_NO_PAD};
use std::io::{Cursor, Write};
use std::{cmp, str, io};
use self::rand::Rng;
#[test]
fn encode_three_bytes() {
let mut c = Cursor::new(Vec::new());
{
let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
let sz = enc.write(b"abc").unwrap();
assert_eq!(sz, 3);
}
assert_eq!(&c.get_ref()[..], encode_config("abc", URL_SAFE).as_bytes());
}
#[test]
fn encode_nine_bytes_two_writes() {
let mut c = Cursor::new(Vec::new());
{
let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
let sz = enc.write(b"abcdef").unwrap();
assert_eq!(sz, 6);
let sz = enc.write(b"ghi").unwrap();
assert_eq!(sz, 3);
}
assert_eq!(
&c.get_ref()[..],
encode_config("abcdefghi", URL_SAFE).as_bytes()
);
}
#[test]
fn encode_one_then_two_bytes() {
let mut c = Cursor::new(Vec::new());
{
let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
let sz = enc.write(b"a").unwrap();
assert_eq!(sz, 1);
let sz = enc.write(b"bc").unwrap();
assert_eq!(sz, 2);
}
assert_eq!(&c.get_ref()[..], encode_config("abc", URL_SAFE).as_bytes());
}
#[test]
fn encode_one_then_five_bytes() {
let mut c = Cursor::new(Vec::new());
{
let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
let sz = enc.write(b"a").unwrap();
assert_eq!(sz, 1);
let sz = enc.write(b"bcdef").unwrap();
assert_eq!(sz, 5);
}
assert_eq!(
&c.get_ref()[..],
encode_config("abcdef", URL_SAFE).as_bytes()
);
}
#[test]
fn encode_1_2_3_bytes() {
let mut c = Cursor::new(Vec::new());
{
let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
let sz = enc.write(b"a").unwrap();
assert_eq!(sz, 1);
let sz = enc.write(b"bc").unwrap();
assert_eq!(sz, 2);
let sz = enc.write(b"def").unwrap();
assert_eq!(sz, 3);
}
assert_eq!(
&c.get_ref()[..],
encode_config("abcdef", URL_SAFE).as_bytes()
);
}
#[test]
fn encode_with_padding() {
let mut c = Cursor::new(Vec::new());
{
let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
enc.write_all(b"abcd").unwrap();
enc.flush().unwrap();
}
assert_eq!(&c.get_ref()[..], encode_config("abcd", URL_SAFE).as_bytes());
}
#[test]
fn encode_with_padding_multiple_writes() {
let mut c = Cursor::new(Vec::new());
{
let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
assert_eq!(1, enc.write(b"a").unwrap());
assert_eq!(2, enc.write(b"bc").unwrap());
assert_eq!(3, enc.write(b"def").unwrap());
assert_eq!(1, enc.write(b"g").unwrap());
enc.flush().unwrap();
}
assert_eq!(
&c.get_ref()[..],
encode_config("abcdefg", URL_SAFE).as_bytes()
);
}
#[test]
fn finish_writes_extra_byte() {
let mut c = Cursor::new(Vec::new());
{
let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
assert_eq!(6, enc.write(b"abcdef").unwrap());
// will be in extra
assert_eq!(1, enc.write(b"g").unwrap());
// 1 trailing byte = 2 encoded chars
let _ = enc.finish().unwrap();
}
assert_eq!(
&c.get_ref()[..],
encode_config("abcdefg", URL_SAFE).as_bytes()
);
}
#[test]
fn write_partial_chunk_encodes_partial_chunk() {
let mut c = Cursor::new(Vec::new());
{
let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
// nothing encoded yet
assert_eq!(2, enc.write(b"ab").unwrap());
// encoded here
let _ = enc.finish().unwrap();
}
assert_eq!(
&c.get_ref()[..],
encode_config("ab", STANDARD_NO_PAD).as_bytes()
);
assert_eq!(3, c.get_ref().len());
}
#[test]
fn write_1_chunk_encodes_complete_chunk() {
let mut c = Cursor::new(Vec::new());
{
let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
assert_eq!(3, enc.write(b"abc").unwrap());
let _ = enc.finish().unwrap();
}
assert_eq!(
&c.get_ref()[..],
encode_config("abc", STANDARD_NO_PAD).as_bytes()
);
assert_eq!(4, c.get_ref().len());
}
#[test]
fn write_1_chunk_and_partial_encodes_only_complete_chunk() {
let mut c = Cursor::new(Vec::new());
{
let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
// "d" not written
assert_eq!(3, enc.write(b"abcd").unwrap());
let _ = enc.finish().unwrap();
}
assert_eq!(
&c.get_ref()[..],
encode_config("abc", STANDARD_NO_PAD).as_bytes()
);
assert_eq!(4, c.get_ref().len());
}
#[test]
fn write_2_partials_to_exactly_complete_chunk_encodes_complete_chunk() {
let mut c = Cursor::new(Vec::new());
{
let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
assert_eq!(1, enc.write(b"a").unwrap());
assert_eq!(2, enc.write(b"bc").unwrap());
let _ = enc.finish().unwrap();
}
assert_eq!(
&c.get_ref()[..],
encode_config("abc", STANDARD_NO_PAD).as_bytes()
);
assert_eq!(4, c.get_ref().len());
}
#[test]
fn write_partial_then_enough_to_complete_chunk_but_not_complete_another_chunk_encodes_complete_chunk_without_consuming_remaining() {
let mut c = Cursor::new(Vec::new());
{
let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
assert_eq!(1, enc.write(b"a").unwrap());
// doesn't consume "d"
assert_eq!(2, enc.write(b"bcd").unwrap());
let _ = enc.finish().unwrap();
}
assert_eq!(
&c.get_ref()[..],
encode_config("abc", STANDARD_NO_PAD).as_bytes()
);
assert_eq!(4, c.get_ref().len());
}
#[test]
fn write_partial_then_enough_to_complete_chunk_and_another_chunk_encodes_complete_chunks() {
let mut c = Cursor::new(Vec::new());
{
let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
assert_eq!(1, enc.write(b"a").unwrap());
// completes partial chunk, and another chunk
assert_eq!(5, enc.write(b"bcdef").unwrap());
let _ = enc.finish().unwrap();
}
assert_eq!(
&c.get_ref()[..],
encode_config("abcdef", STANDARD_NO_PAD).as_bytes()
);
assert_eq!(8, c.get_ref().len());
}
#[test]
fn write_partial_then_enough_to_complete_chunk_and_another_chunk_and_another_partial_chunk_encodes_only_complete_chunks() {
let mut c = Cursor::new(Vec::new());
{
let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
assert_eq!(1, enc.write(b"a").unwrap());
// completes partial chunk, and another chunk, with one more partial chunk that's not
// consumed
assert_eq!(5, enc.write(b"bcdefe").unwrap());
let _ = enc.finish().unwrap();
}
assert_eq!(
&c.get_ref()[..],
encode_config("abcdef", STANDARD_NO_PAD).as_bytes()
);
assert_eq!(8, c.get_ref().len());
}
#[test]
fn drop_calls_finish_for_you() {
let mut c = Cursor::new(Vec::new());
{
let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
assert_eq!(1, enc.write(b"a").unwrap());
}
assert_eq!(
&c.get_ref()[..],
encode_config("a", STANDARD_NO_PAD).as_bytes()
);
assert_eq!(2, c.get_ref().len());
}
#[test]
fn every_possible_split_of_input() {
let mut rng = rand::thread_rng();
let mut orig_data = Vec::<u8>::new();
let mut stream_encoded = Vec::<u8>::new();
let mut normal_encoded = String::new();
let size = 5_000;
for i in 0..size {
orig_data.clear();
stream_encoded.clear();
normal_encoded.clear();
for _ in 0..size {
orig_data.push(rng.gen());
}
let config = random_config(&mut rng);
encode_config_buf(&orig_data, config, &mut normal_encoded);
{
let mut stream_encoder = EncoderWriter::new(&mut stream_encoded, config);
// Write the first i bytes, then the rest
stream_encoder.write_all(&orig_data[0..i]).unwrap();
stream_encoder.write_all(&orig_data[i..]).unwrap();
}
assert_eq!(normal_encoded, str::from_utf8(&stream_encoded).unwrap());
}
}
#[test]
fn encode_random_config_matches_normal_encode_reasonable_input_len() {
// choose up to 2 * buf size, so ~half the time it'll use a full buffer
do_encode_random_config_matches_normal_encode(super::encoder::BUF_SIZE * 2)
}
#[test]
fn encode_random_config_matches_normal_encode_tiny_input_len() {
do_encode_random_config_matches_normal_encode(10)
}
#[test]
fn retrying_writes_that_error_with_interrupted_works() {
let mut rng = rand::thread_rng();
let mut orig_data = Vec::<u8>::new();
let mut stream_encoded = Vec::<u8>::new();
let mut normal_encoded = String::new();
for _ in 0..1_000 {
orig_data.clear();
stream_encoded.clear();
normal_encoded.clear();
let orig_len: usize = rng.gen_range(100, 20_000);
for _ in 0..orig_len {
orig_data.push(rng.gen());
}
// encode the normal way
let config = random_config(&mut rng);
encode_config_buf(&orig_data, config, &mut normal_encoded);
// encode via the stream encoder
{
let mut interrupt_rng = rand::thread_rng();
let mut interrupting_writer = InterruptingWriter {
w: &mut stream_encoded,
rng: &mut interrupt_rng,
fraction: 0.8,
};
let mut stream_encoder = EncoderWriter::new(&mut interrupting_writer, config);
let mut bytes_consumed = 0;
while bytes_consumed < orig_len {
// use short inputs since we want to use `extra` a lot as that's what needs rollback
// when errors occur
let input_len: usize = cmp::min(rng.gen_range(0, 10),
orig_len - bytes_consumed);
// write a little bit of the data
retry_interrupted_write_all(&mut stream_encoder,
&orig_data[bytes_consumed..bytes_consumed + input_len])
.unwrap();
bytes_consumed += input_len;
}
loop {
let res = stream_encoder.finish();
match res {
Ok(_) => break,
Err(e) => match e.kind() {
io::ErrorKind::Interrupted => continue,
_ => Err(e).unwrap() // bail
}
}
}
assert_eq!(orig_len, bytes_consumed);
}
assert_eq!(normal_encoded, str::from_utf8(&stream_encoded).unwrap());
}
}
/// Retry writes until all the data is written or an error that isn't Interrupted is returned.
fn retry_interrupted_write_all<W: Write>(w: &mut W, buf: &[u8]) -> io::Result<()> {
let mut written = 0;
while written < buf.len() {
let res = w.write(&buf[written..]);
match res {
Ok(len) => written += len,
Err(e) => match e.kind() {
io::ErrorKind::Interrupted => continue,
_ => {
println!("got kind: {:?}", e.kind());
return Err(e);
}
}
}
}
Ok(())
}
fn do_encode_random_config_matches_normal_encode(max_input_len: usize) {
let mut rng = rand::thread_rng();
let mut orig_data = Vec::<u8>::new();
let mut stream_encoded = Vec::<u8>::new();
let mut normal_encoded = String::new();
for _ in 0..1_000 {
orig_data.clear();
stream_encoded.clear();
normal_encoded.clear();
let orig_len: usize = rng.gen_range(100, 20_000);
for _ in 0..orig_len {
orig_data.push(rng.gen());
}
// encode the normal way
let config = random_config(&mut rng);
encode_config_buf(&orig_data, config, &mut normal_encoded);
// encode via the stream encoder
{
let mut stream_encoder = EncoderWriter::new(&mut stream_encoded, config);
let mut bytes_consumed = 0;
while bytes_consumed < orig_len {
let input_len: usize = cmp::min(rng.gen_range(0, max_input_len),
orig_len - bytes_consumed);
// write a little bit of the data
stream_encoder
.write_all(&orig_data[bytes_consumed..bytes_consumed + input_len])
.unwrap();
bytes_consumed += input_len;
}
stream_encoder.finish().unwrap();
assert_eq!(orig_len, bytes_consumed);
}
assert_eq!(normal_encoded, str::from_utf8(&stream_encoded).unwrap());
}
}
/// A `Write` implementation that returns Interrupted some fraction of the time, randomly.
struct InterruptingWriter<'a, W: 'a + Write, R: 'a + Rng> {
w: &'a mut W,
rng: &'a mut R,
/// In [0, 1]. If a random number in [0, 1] is `<= threshold`, `Write` methods will return
/// an `Interrupted` error
fraction: f64,
}
impl<'a, W: Write, R: Rng> Write for InterruptingWriter<'a, W, R> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.rng.gen_range(0.0, 1.0) <= self.fraction {
return Err(io::Error::new(io::ErrorKind::Interrupted, "interrupted"));
}
self.w.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
if self.rng.gen_range(0.0, 1.0) <= self.fraction {
return Err(io::Error::new(io::ErrorKind::Interrupted, "interrupted"));
}
self.w.flush()
}
}

6
src/write/mod.rs Normal file
View File

@ -0,0 +1,6 @@
//! Implementations of `io::Write` to transparently handle base64.
mod encoder;
pub use self::encoder::EncoderWriter;
#[cfg(test)]
mod encoder_tests;