Re-enable threading, this time without rustrt

Modified to remove dependencies on the runtime. Stack overflow error
reporting is gone, just kill the process on stack overflow.
This commit is contained in:
Mike Robinson 2014-10-30 20:12:29 +00:00
parent f56e9cc659
commit e80212f6e8
9 changed files with 2695 additions and 25 deletions

View File

@ -1,7 +1,13 @@
The MIT License (MIT)
rust-libretro, excluding src/rust_wrapper/rustrt_files:
Copyright (c) 2014 Mike Robinson
Files in src/rust_wrapper/rustrt_files:
Copyright 2013-2014 The Rust Project Developers
See src/rust_wrapper/rustrt_files/COPYRIGHT
and src/rust_wrapper/rustrt_files/AUTHORS.txt
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
@ -19,3 +25,5 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -26,11 +26,17 @@
#![feature(globs)]
#![feature(macro_rules)]
#![feature(lang_items)]
#![feature(unsafe_destructor)]
#![feature(linkage)]
#![feature(phase)]
#![feature(asm)]
#![no_std]
extern crate libc;
extern crate rlibc;
#[phase(plugin, link)]
extern crate core;
extern crate alloc;
use core::intrinsics::transmute;
use core::intrinsics::abort;
@ -39,16 +45,22 @@ use core::prelude::*;
use rust_wrapper::*;
pub mod rust_wrapper;
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
#[lang = "panic_fmt"]
mod std { pub use core::fmt; }
#[lang = "stack_exhausted"]
extern fn stack_exhausted()
{
unsafe {abort();}
}
#[lang = "eh_personality"] extern fn eh_personality() {}
#[lang = "panic_fmt"]
#[allow(unused_variables)]
extern fn panic_fmt(args: &core::fmt::Arguments,
file: &str,
line: uint) -> !
{
unsafe {abort();}
}

View File

@ -8,10 +8,17 @@ use libc::malloc;
use libc::free;
use core::mem::size_of;
use core::mem::uninitialized;
use rust_wrapper::libretro::*;
use core::str::raw::c_str_to_static_slice;
use core::prelude::*;
use core::atomic::{AtomicBool, SeqCst, INIT_ATOMIC_BOOL};
use rust_wrapper::mutex::*;
use rust_wrapper::libretro::*;
pub mod libretro;
#[path = "rustrt_files/mutex.rs"] pub mod mutex;
#[path = "rustrt_files/thread.rs"] pub mod thread;
#[path = "rustrt_files/stack.rs"] pub mod stack;
#[path = "rustrt_files/stack_overflow.rs"] pub mod stack_overflow;
@ -451,7 +458,7 @@ pub extern fn retro_run()
{
use super::{AV_SCREEN_WIDTH, AV_SCREEN_HEIGHT, COLOR_DEPTH_32};
// unsafe {VIDEO_LOCK.lock_noguard();}
unsafe {VIDEO_LOCK.lock_noguard();}
// For now, poll input hardware only once per displayed frame
// (InputState::poll uses cached values)
@ -465,17 +472,17 @@ pub extern fn retro_run()
super::snapshot_video();
super::render_video();
// unsafe {VIDEO_LOCK.unlock_noguard();}
// unsafe {
// let guard = VIDEO_WAIT.lock();
// guard.signal();
// }
unsafe {VIDEO_LOCK.unlock_noguard();}
unsafe {
let guard = VIDEO_WAIT.lock();
guard.signal();
}
}
super::core_run();
}
//unsafe {VIDEO_LOCK.lock_noguard();}
unsafe {VIDEO_LOCK.lock_noguard();}
unsafe {
retro_video_refresh_cb.unwrap()(frame_buf as *const c_void,
AV_SCREEN_WIDTH,
@ -483,7 +490,7 @@ pub extern fn retro_run()
(AV_SCREEN_WIDTH *
if COLOR_DEPTH_32 {4} else {2}) as size_t);
}
//unsafe {VIDEO_LOCK.unlock_noguard();}
unsafe {VIDEO_LOCK.unlock_noguard();}
}
pub static mut frame_buf: *mut c_void = 0i as *mut c_void;
@ -499,17 +506,16 @@ pub unsafe extern "C" fn retro_init()
else {size_of::<u16>()} as u64);
// start video thread
//Thread::spawn(video_thread);
thread::Thread::spawn(video_thread);
}
/*
static VIDEO_SHUTDOWN: AtomicBool = INIT_ATOMIC_BOOL;
static VIDEO_LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
static VIDEO_WAIT: StaticNativeMutex = NATIVE_MUTEX_INIT;
fn video_thread()
{
println!("Video thread starts");
loop
{
unsafe {
@ -522,17 +528,17 @@ fn video_thread()
unsafe {VIDEO_LOCK.unlock_noguard();}
}
}
*/
#[no_mangle]
pub unsafe extern "C" fn retro_deinit()
{
// VIDEO_SHUTDOWN.store(true, SeqCst);
// {
// let guard = VIDEO_WAIT.lock();
// guard.signal();
// }
// VIDEO_LOCK.destroy();
VIDEO_SHUTDOWN.store(true, SeqCst);
{
let guard = VIDEO_WAIT.lock();
guard.signal();
}
VIDEO_LOCK.destroy();
if frame_buf != 0u8 as *mut c_void
{ free(frame_buf); }

View File

@ -0,0 +1,673 @@
Rust was written by these fine people:
Aaron Laursen <aaronlaursen@gmail.com>
Aaron Raimist <aaron@aaronraimist.com>
Aaron Todd <github@opprobrio.us>
Aaron Turon <aturon@mozilla.com>
Adam Bozanich <adam.boz@gmail.com>
Adolfo Ochagavía <aochagavia92@gmail.com>
Adrien Brault <adrien.brault@gmail.com>
Adrien Tétar <adri-from-59@hotmail.fr>
Ahmed Charles <ahmedcharles@gmail.com>
Alan Andrade <alan.andradec@gmail.com>
Alan Williams <mralert@gmail.com>
Aleksander Balicki <balicki.aleksander@gmail.com>
Alex Crichton <alex@alexcrichton.com>
Alex Gaynor <alex.gaynor@gmail.com>
Alex Lyon <arcterus@mail.com>
Alex Rønne Petersen <alex@lycus.org>
Alex Whitney <aw1209@ic.ac.uk>
Alexander Light <scialexlight@gmail.com>
Alexander Stavonin <a.stavonin@gmail.com>
Alexandre Gagnon <alxgnon@gmail.com>
Alexandros Tasos <sdi1100085@di.uoa.gr>
Alexei Sholik <alcosholik@gmail.com>
Alexis Beingessner <a.beingessner@gmail.com>
Alfie John <alfiej@fastmail.fm>
Ali Smesseim <smesseim.ali@gmail.com>
Alisdair Owens <awo101@zepler.net>
Aljaž "g5pw" Srebrnič <a2piratesoft@gmail.com>
Amy Unger <amy.e.unger@gmail.com>
Anders Kaseorg <andersk@mit.edu>
Andre Arko <andre@arko.net>
Andreas Gal <gal@mozilla.com>
Andreas Martens <andreasm@fastmail.fm>
Andreas Neuhaus <zargony@zargony.com>
Andreas Ots <andreasots@gmail.com>
Andreas Tolfsen <ato@mozilla.com>
Andrei Formiga <archimedes_siracusa@hotmail.com>
Andrew Chin <achin@eminence32.net>
Andrew Dunham <andrew@du.nham.ca>
Andrew Gallant <jamslam@gmail.com>
Andrew Paseltiner <apaseltiner@gmail.com>
Andrew Poelstra <asp11@sfu.ca>
Angus Lees <gus@inodes.org>
Anthony Juckel <ajuckel@gmail.com>
Anton Lofgren <alofgren@op5.com>
Anton Löfgren <anton.lofgren@gmail.com>
Arcterus <Arcterus@mail.com>
Ariel Ben-Yehuda <arielb1@mail.tau.ac.il>
Arjan Topolovec <arjan.top@gmail.com>
Arkaitz Jimenez <arkaitzj@gmail.com>
Armin Ronacher <armin.ronacher@active-4.com>
Arpad Borsos <arpad.borsos@googlemail.com>
Ashok Gautham <ScriptDevil@gmail.com>
Austin Bonander <austin.bonander@gmail.com>
Austin King <shout@ozten.com>
Austin Seipp <mad.one@gmail.com>
Axel Viala <axel.viala@darnuria.eu>
Aydin Kim <ladinjin@hanmail.net>
Ben Alpert <ben@benalpert.com>
Ben Blum <bblum@andrew.cmu.edu>
Ben Gamari <bgamari.foss@gmail.com>
Ben Harris <mail@bharr.is>
Ben Kelly <ben@wanderview.com>
Ben Noordhuis <info@bnoordhuis.nl>
Ben Striegel <ben.striegel@gmail.com>
Benjamin Adamson <adamson.benjamin@gmail.com>
Benjamin Herr <ben@0x539.de>
Benjamin Jackman <ben@jackman.biz>
Benjamin Kircher <benjamin.kircher@gmail.com>
Benjamin Peterson <benjamin@python.org>
Bheesham Persaud <bheesham.persaud@live.ca>
Bilal Husain <bilal@bilalhusain.com>
Bill Fallon <bill.fallon@robos.li>
Bill Myers <bill_myers@outlook.com>
Bill Wendling <wendling@apple.com>
Birunthan Mohanathas <birunthan@mohanathas.com>
Björn Steinbrink <bsteinbr@gmail.com>
Boris Egorov <jightuse@gmail.com>
Bouke van der Bijl <boukevanderbijl@gmail.com>
Brandon Sanderson <singingboyo@hotmail.com>
Brandon Waskiewicz <brandon.waskiewicz@gmail.com>
Branimir <branimir@volomp.com>
Brendan Cully <brendan@kublai.com>
Brendan Eich <brendan@mozilla.org>
Brendan McLoughlin <btmcloughlin@gmail.com>
Brendan Zabarauskas <bjzaba@yahoo.com.au>
Brett Cannon <brett@python.org>
Brian Anderson <banderson@mozilla.com>
Brian Dawn <brian.t.dawn@gmail.com>
Brian J. Burg <burg@cs.washington.edu>
Brian Koropoff <bkoropoff@gmail.com>
Brian Leibig <brian.leibig@gmail.com>
Bruno de Oliveira Abinader <bruno.d@partner.samsung.com>
Bryan Dunsmore <dunsmoreb@gmail.com>
Byron Williams <byron@112percent.com>
Cadence Marseille <cadencemarseille@gmail.com>
Caitlin Potter <snowball@defpixel.com>
Cameron Zwarich <zwarich@mozilla.com>
Carl-Anton Ingmarsson <mail@carlanton.se>
Carlos <toqueteos@gmail.com>
Carol Nichols <carol.nichols@gmail.com>
Carol Willing <carolcode@willingconsulting.com>
Carter Tazio Schonwald <carter.schonwald@gmail.com>
Chris Double <chris.double@double.co.nz>
Chris Morgan <me@chrismorgan.info>
Chris Nixon <chris.nixon@sigma.me.uk>
Chris Peterson <cpeterson@mozilla.com>
Chris Pressey <cpressey@gmail.com>
Chris Sainty <csainty@hotmail.com>
Chris Shea <cmshea@gmail.com>
Chris Wong <lambda.fairy@gmail.com>
Christoph Burgdorf <christoph.burgdorf@bvsn.org>
Christopher Bergqvist <spambox0@digitalpoetry.se>
Christopher Kendell <ckendell@outlook.com>
Chuck Ries <chuck.ries@gmail.com>
Clark Gaebel <cg.wowus.cg@gmail.com>
Clinton Ryan <clint.ryan3@gmail.com>
Cody Schroeder <codys@cs.washington.edu>
Cole Mickens <cole.mickens@gmail.com>
Colin Davidson <colrdavidson@gmail.com>
Colin Sherratt <colin.sherratt@gmail.com>
Conrad Kleinespel <conradk@conradk.com>
Corey Ford <corey@coreyford.name>
Corey Richardson <corey@octayn.net>
DJUrsus <colinvh@divitu.com>
Damian Gryski <damian@gryski.com>
Damien Grassart <damien@grassart.com>
Damien Radtke <dradtke@channeliq.com>
Damien Schoof <damien.schoof@gmail.com>
Dan Albert <danalbert@google.com>
Dan Burkert <dan@danburkert.com>
Dan Connolly <dckc@madmode.com>
Dan Luu <danluu@gmail.com>
Daniel Brooks <db48x@db48x.net>
Daniel Fagnan <dnfagnan@gmail.com>
Daniel Farina <daniel@fdr.io>
Daniel Hofstetter <daniel.hofstetter@42dh.com>
Daniel Luz <dev@mernen.com>
Daniel MacDougall <dmacdougall@gmail.com>
Daniel Micay <danielmicay@gmail.com>
Daniel Patterson <dbp@riseup.net>
Daniel Ralston <Wubbulous@gmail.com>
Daniel Rosenwasser <DanielRosenwasser@gmail.com>
Daniel Ursache Dogariu <contact@danniel.net>
Dave Herman <dherman@mozilla.com>
Dave Hodder <dmh@dmh.org.uk>
David Creswick <dcrewi@gyrae.net>
David Forsythe <dforsythe@gmail.com>
David Halperin <halperin.dr@gmail.com>
David Klein <david.klein@baesystemsdetica.com>
David Manescu <david.manescu@gmail.com>
David Rajchenbach-Teller <dteller@mozilla.com>
David Renshaw <dwrenshaw@gmail.com>
David Vazgenovich Shakaryan <dvshakaryan@gmail.com>
Davis Silverman <sinistersnare@gmail.com>
Derecho <derecho@sector5d.org>
Derek Chiang <derekchiang93@gmail.com>
Derek Guenther <dguenther9@gmail.com>
Derek Harland <derek.harland@finq.co.nz>
Diego Ongaro <ongaro@cs.stanford.edu>
Diggory Hardy <diggory.hardy@gmail.com>
Dimitri Krassovski <labria@startika.com>
Dirk Leifeld <leifeld@posteo.de>
Dirkjan Bussink <d.bussink@gmail.com>
Div Shekhar <div@pagerduty.com>
Dmitry Ermolov <epdmitry@yandex.ru>
Dmitry Promsky <dmitry@willworkforcookies.com>
Dmitry Vasiliev <dima@hlabs.org>
Do Nhat Minh <mrordinaire@gmail.com>
Donovan Preston <donovanpreston@gmail.com>
Douglas Young <rcxdude@gmail.com>
Drew Willcoxon <adw@mozilla.com>
Dylan Braithwaite <dylanbraithwaite1@gmail.com>
Dzmitry Malyshau <kvarkus@gmail.com>
Eduard Bopp <eduard.bopp@aepsil0n.de>
Eduard Burtescu <edy.burt@gmail.com>
Eduardo Bautista <me@eduardobautista.com>
Edward Wang <edward.yu.wang@gmail.com>
Edward Z. Yang <ezyang@cs.stanford.edu>
Ehsanul Hoque <ehsanul@ehsanul.com>
Elliott Slaughter <elliottslaughter@gmail.com>
Elly Fong-Jones <elly@leptoquark.net>
Emanuel Rylke <ema-fox@web.de>
Eric Biggers <ebiggers3@gmail.com>
Eric Holk <eric.holk@gmail.com>
Eric Holmes <eric@ejholmes.net>
Eric Martin <e.a.martin1337@gmail.com>
Eric Reed <ecreed@cs.washington.edu>
Erick Tryzelaar <erick.tryzelaar@gmail.com>
Erik Lyon <elyon001@local.fake>
Erik Price <erik.price16@gmail.com>
Erik Rose <erik@mozilla.com>
Etienne Millon <me@emillon.org>
Eunchong Yu <kroisse@gmail.com>
Evan Klitzke <evan@eklitzke.org>
Evan McClanahan <evan@evanmcc.com>
Evgeny Sologubov
Fabian Deutsch <fabian.deutsch@gmx.de>
Fabrice Desré <fabrice@desre.org>
Falco Hirschenberger <falco.hirschenberger@gmail.com>
Fedor Indutny <fedor.indutny@gmail.com>
Felix Crux <felixc@felixcrux.com>
Felix Raimundo <felix.raimundo@telecom-paristech.fr>
Felix S. Klock II <pnkfelix@pnkfx.org>
Flaper Fesp <flaper87@gmail.com>
Flavio Percoco <flaper87@gmail.com>
Florian Gilcher <florian.gilcher@asquera.de>
Florian Hahn <flo@fhahn.com>
Florian Hartwig <florian.j.hartwig@gmail.com>
Florian Zeitz <florob@babelmonkeys.de>
Francisco Souza <f@souza.cc>
Franklin Chen <franklinchen@franklinchen.com>
Gabriel <g2p.code@gmail.com>
Gareth Daniel Smith <garethdanielsmith@gmail.com>
Gary Linscott <glinscott@gmail.com>
Gary M. Josack <gary@byoteki.com>
Gavin Baker <gavinb@antonym.org>
Geoff Hill <geoff@geoffhill.org>
Geoffroy Couprie <geo.couprie@gmail.com>
George Papanikolaou <g3orge.app@gmail.com>
Georges Dubus <georges.dubus@gmail.com>
Gioele Barabucci <gioele@svario.it>
Glenn Willen <gwillen@nerdnet.org>
Gonçalo Cabrita <_@gmcabrita.com>
Graham Fawcett <fawcett@uwindsor.ca>
Grahame Bowland <grahame@angrygoats.net>
Graydon Hoare <graydon@mozilla.com>
Grigoriy <ohaistarlight@gmail.com>
Guillaume Pinot <texitoi@texitoi.eu>
Gyorgy Andrasek <jurily@gmail.com>
Gábor Horváth <xazax.hun@gmail.com>
Gábor Lehel <glaebhoerl@gmail.com>
Haitao Li <lihaitao@gmail.com>
Hanno Braun <mail@hannobraun.de>
Harry Marr <harry.marr@gmail.com>
Heather <heather@cynede.net>
Herman J. Radtke III <hermanradtke@gmail.com>
HeroesGrave <heroesgrave@gmail.com>
Hong Chulju <ang0123dev@gmail.com>
Honza Strnad <hanny.strnad@gmail.com>
Hugo Jobling <hello@thisishugo.com>
Huon Wilson <dbau.pp+github@gmail.com>
Ian D. Bollinger <ian.bollinger@gmail.com>
Ian Daniher <it.daniher@gmail.com>
Igor Bukanov <igor@mir2.org>
Ilya Dmitrichenko <ilya@xively.com>
Ilyong Cho <ilyoan@gmail.com>
Isaac Aggrey <isaac.aggrey@gmail.com>
Isaac Dupree <antispam@idupree.com>
Ivan Enderlin <ivan.enderlin@hoa-project.net>
Ivan Petkov <ivanppetkov@gmail.com>
Ivano Coppola <rgbfirefox@gmail.com>
J. J. Weber <jjweber@gmail.com>
J.C. Moyer <jmoyer1992@gmail.com>
Jack Heizer <jack.heizer@gmail.com>
Jack Moffitt <jack@metajack.im>
Jacob Harris Cryer Kragh <jhckragh@gmail.com>
Jacob Hegna <jacobhegna@gmail.com>
Jacob Parker <j3parker@csclub.uwaterloo.ca>
Jaemin Moon <jaemin.moon@samsung.com>
Jag Talon <talon.jag@gmail.com>
Jake Kaufman <theevocater@gmail.com>
Jake Kerr <kodafox@gmail.com>
Jake Scott <jake.net@gmail.com>
Jakub Wieczorek <jakubw@jakubw.net>
James Deng <cnjamesdeng@gmail.com>
James Hurst <jamesrhurst@users.noreply.github.com>
James Lal <james@lightsofapollo.com>
James Laverack <james@jameslaverack.com>
James Miller <bladeon@gmail.com>
James Rowe <jroweboy@gmail.com>
James Sanders <sanderjd@gmail.com>
James Tranovich <james@openhorizonlabs.com>
Jan Kobler <eng1@koblersystems.de>
Jan Niklas Hasse <jhasse@gmail.com>
Jannis Harder <jix@jixco.de>
Jason Fager <jfager@gmail.com>
Jason Orendorff <jorendorff@mozilla.com>
Jason Thompson <jason@jthompson.ca>
Jason Toffaletti <jason@topsy.com>
Jauhien Piatlicki <jauhien@gentoo.org>
Jay Anderson <jayanderson0@gmail.com>
Jed Davis <jld@panix.com>
Jed Estep <aje@jhu.edu>
Jeff Balogh <jbalogh@mozilla.com>
Jeff Muizelaar <jmuizelaar@mozilla.com>
Jeff Olson <olson.jeffery@gmail.com>
Jeffrey Yasskin <jyasskin@gmail.com>
Jens Nockert <jens@nockert.se>
Jeong YunWon <jeong@youknowone.org>
Jeremy Letang <letang.jeremy@gmail.com>
Jesse Jones <jesse9jones@gmail.com>
Jesse Luehrs <doy@tozt.net>
Jesse Ray <jesse@localhost.localdomain>
Jesse Ruderman <jruderman@gmail.com>
Jihyun Yu <jihyun@nclab.kaist.ac.kr>
Jim Blandy <jimb@red-bean.com>
Jim Radford <radford@blackbean.org>
Jimmie Elvenmark <flugsio@gmail.com>
Jimmy Lu <jimmy.lu.2011@gmail.com>
Jimmy Zelinskie <jimmyzelinskie@gmail.com>
Joe Pletcher <joepletcher@gmail.com>
Joe Schafer <joe@jschaf.com>
Johannes Löthberg <johannes@kyriasis.com>
Johannes Muenzel <jmuenzel@gmail.com>
John Barker <jebarker@gmail.com>
John Clements <clements@racket-lang.org>
John Fresco <john.fresco@utah.edu>
John Gallagher <jgallagher@bignerdranch.com>
John Kåre Alsaker <john.kare.alsaker@gmail.com>
John Louis Walker <injyuw@gmail.com>
John Schmidt <john.schmidt.h@gmail.com>
John Simon <john@johnsoft.com>
Jon Morton <jonanin@gmail.com>
Jonas Hietala <tradet.h@gmail.com>
Jonathan Bailey <jbailey@mozilla.com>
Jonathan Boyett <jonathan@failingservers.com>
Jonathan Reem <jonathan.reem@gmail.com>
Jonathan S <gereeter@gmail.com>
Jonathan Sternberg <jonathansternberg@gmail.com>
Jordi Boggiano <j.boggiano@seld.be>
Jorge Aparicio <japaric@linux.com>
Joris Rehm <joris.rehm@wakusei.fr>
Joseph Crail <jbcrail@gmail.com>
Joseph Martin <pythoner6@gmail.com>
Josh Matthews <josh@joshmatthews.net>
Joshua Clark <joshua.clark@txstate.edu>
Joshua Wise <joshua@joshuawise.com>
Joshua Yanovski <pythonesque@gmail.com>
Julia Evans <julia@jvns.ca>
Julian Orth <ju.orth@gmail.com>
Junyoung Cho <june0.cho@samsung.com>
JustAPerson <jpriest8@ymail.com>
Justin Noah <justinnoah@gmail.com>
Jyun-Yan You <jyyou.tw@gmail.com>
Kang Seonghoon <kang.seonghoon@mearie.org>
Kasey Carrothers <kaseyc.808@gmail.com>
Keegan McAllister <kmcallister@mozilla.com>
Kelly Wilson <wilsonk@cpsc.ucalgary.ca>
Keshav Kini <keshav.kini@gmail.com>
Kevin Atkinson <kevina@cs.utah.edu>
Kevin Ballard <kevin@sb.org>
Kevin Butler <haqkrs@gmail.com>
Kevin Cantu <me@kevincantu.org>
Kevin Mehall <km@kevinmehall.net>
Kevin Murphy <kemurphy.cmu@gmail.com>
Kevin Walter <kevin.walter.private@googlemail.com>
Kiet Tran <ktt3ja@gmail.com>
Kyeongwoon Lee <kyeongwoon.lee@samsung.com>
Lars Bergstrom <lbergstrom@mozilla.com>
Laurent Bonnans <bonnans.l@gmail.com>
Lawrence Velázquez <larryv@alum.mit.edu>
Leah Hanson <astrieanna@gmail.com>
Lee Wondong <wdlee91@gmail.com>
LemmingAvalanche <haugsbakk@yahoo.no>
Lennart Kudling <github@kudling.de>
Léo Testard <leo.testard@gmail.com>
Liigo Zhuang <com.liigo@gmail.com>
Lindsey Kuper <lindsey@composition.al>
Luca Bruno <lucab@debian.org>
Luis de Bethencourt <luis@debethencourt.com>
Luqman Aden <me@luqman.ca>
Magnus Auvinen <magnus.auvinen@gmail.com>
Mahmut Bulut <mahmutbulut0@gmail.com>
Makoto Nakashima <makoto.nksm+github@gmail.com>
Manish Goregaokar <manishsmail@gmail.com>
Marcel Rodrigues <marcelgmr@gmail.com>
Margaret Meyerhofer <mmeyerho@andrew.cmu.edu>
Marijn Haverbeke <marijnh@gmail.com>
Mark Lacey <641@rudkx.com>
Mark Rowe <mrowe@bdash.net.nz>
Mark Sinclair <mark.edward.x@gmail.com>
Mark Vian <mrv.caseus@gmail.com>
Markus Unterwaditzer <markus@unterwaditzer.net>
Marti Raudsepp <marti@juffo.org>
Martin DeMello <martindemello@gmail.com>
Martin Olsson <martin@minimum.se>
Marvin Löbel <loebel.marvin@gmail.com>
Matej Lach <matej.lach@gmail.com>
Mateusz Czapliński <czapkofan@gmail.com>
Mathijs van de Nes <git@mathijs.vd-nes.nl>
Matt Brubeck <mbrubeck@limpet.net>
Matt Carberry <carberry.matt@gmail.com>
Matt Coffin <mcoffin13@gmail.com>
Matthew Auld <matthew.auld@intel.com>
Matthew Iselin <matthew@theiselins.net>
Matthew McPherrin <matthew@mcpherrin.ca>
Matthew O'Connor <thegreendragon@gmail.com>
Matthias Einwag <matthias.einwag@live.com>
Matthijs Hofstra <thiezz@gmail.com>
Matthijs van der Vleuten <git@zr40.nl>
Max Penet <max.penet@gmail.com>
Maxim Kolganov <kolganov.mv@gmail.com>
Meyer S. Jacobs <meyermagic@gmail.com>
Micah Chalmer <micah@micahchalmer.net>
Michael Arntzenius <daekharel@gmail.com>
Michael Bebenita <mbebenita@mozilla.com>
Michael Dagitses <dagitses@google.com>
Michael Darakananda <pongad@gmail.com>
Michael Fairley <michaelfairley@gmail.com>
Michael Kainer <kaini1123@gmail.com>
Michael Letterle <michael.letterle@gmail.com>
Michael Matuzak <mmatuzak@gmail.com>
Michael Neumann <mneumann@ntecs.de>
Michael Pratt <michael@pratt.im>
Michael Reinhard <mcreinhard@users.noreply.github.com>
Michael Sproul <micsproul@gmail.com>
Michael Sullivan <sully@msully.net>
Michael Williams <m.t.williams@live.com>
Michael Woerister <michaelwoerister@gmail>
Michael Zhou <moz@google.com>
Mick Koch <kchmck@gmail.com>
Mickaël Delahaye <mickael.delahaye@gmail.com>
Mihnea Dobrescu-Balaur <mihnea@linux.com>
Mike Boutin <mike.boutin@gmail.com>
Mike Robinson <mikeprobinsonuk@gmail.com>
Mikko Perttunen <cyndis@kapsi.fi>
Ms2ger <ms2ger@gmail.com>
Mukilan Thiagarajan <mukilanthiagarajan@gmail.com>
NODA, Kai <nodakai@gmail.com>
Nathan Froyd <froydnj@gmail.com>
Nathan Typanski <ntypanski@gmail.com>
Nathaniel Herman <nherman@college.harvard.edu>
NiccosSystem <niccossystem@gmail.com>
Nick Cameron <ncameron@mozilla.com>
Nick Desaulniers <ndesaulniers@mozilla.com>
Nick Howell <howellnick@gmail.com>
Nicolas Silva <nical.silva@gmail.com>
Niels langager Ellegaard <niels.ellegaard@gmail.com>
Nif Ward <nif.ward@gmail.com>
Nikita Pekin <contact@nikitapek.in>
Niklas Koep <niklas.koep@gmail.com>
Niko Matsakis <niko@alum.mit.edu>
Noam Yorav-Raphael <noamraph@gmail.com>
Noufal Ibrahim <noufal@nibrahim.net.in>
O S K Chaitanya <osk@medhas.org>
OGINO Masanori <masanori.ogino@gmail.com>
Olivier Saut <osaut@airpost.net>
Olle Jonsson <olle.jonsson@gmail.com>
Or Brostovski <tohava@gmail.com>
Oren Hazi <oren.hazi@gmail.com>
Orphée Lafond-Lummis <o@orftz.com>
P1start <rewi-github@whanau.org>
Pablo Brasero <pablo@pablobm.com>
Palmer Cox <p@lmercox.com>
Paolo Falabella <paolo.falabella@gmail.com>
Patrick Reisert <kpreisert@gmail.com>
Patrick Walton <pcwalton@mimiga.net>
Patrick Yevsukov <patrickyevsukov@users.noreply.github.com>
Patrik Kårlin <patrik.karlin@gmail.com>
Paul Collins <paul@ondioline.org>
Paul Stansifer <paul.stansifer@gmail.com>
Paul Woolcock <pwoolcoc+github@gmail.com>
Pavel Panchekha <me@pavpanchekha.com>
Pawel Olzacki <p.olzacki2@samsung.com>
Peer Aramillo Irizar <peer.aramillo.irizar@gmail.com>
Peter Atashian <retep998@gmail.com>
Peter Hull <peterhull90@gmail.com>
Peter Marheine <peter@taricorp.net>
Peter Williams <peter@newton.cx>
Peter Zotov <whitequark@whitequark.org>
Petter Remen <petter.remen@gmail.com>
Phil Dawes <phil@phildawes.net>
Phil Ruffwind <rf@rufflewind.com>
Philipp Brüschweiler <blei42@gmail.com>
Philipp Gesang <phg42.2a@gmail.com>
Piotr Czarnecki <pioczarn@gmail.com>
Piotr Jawniak <sawyer47@gmail.com>
Piotr Zolnierek <pz@anixe.pl>
Pradeep Kumar <gohanpra@gmail.com>
Prudhvi Krishna Surapaneni <me@prudhvi.net>
Pythoner6 <pythoner6@gmail.com>
Q.P.Liu <qpliu@yahoo.com>
Rafael Ávila de Espíndola <respindola@mozilla.com>
Ralph Bodenner <rkbodenner+github@gmail.com>
Ralph Giles <giles@thaumas.net>
Ramkumar Ramachandra <artagnon@gmail.com>
Randati <anttivan@gmail.com>
Raphael Catolino <raphael.catolino@gmail.com>
Raphael Speyer <rspeyer@gmail.com>
Reilly Watson <reillywatson@gmail.com>
Renato Riccieri Santos Zannon <renato@rrsz.com.br>
Renato Zannon <renato@rrsz.com.br>
Reuben Morais <reuben.morais@gmail.com>
Ricardo M. Correia <rcorreia@wizy.org>
Rich Lane <rlane@club.cc.cmu.edu>
Richard Diamond <wichard@vitalitystudios.com>
Richo Healey <richo@psych0tik.net>
Rick Waldron <waldron.rick@gmail.com>
Rob Arnold <robarnold@cs.cmu.edu>
Rob Hoelz <rob@hoelz.ro>
Robert Buonpastore <robert.buonpastore@gmail.com>
Robert Clipsham <robert@octarineparrot.com>
Robert Gawdzik <rgawdzik@hotmail.com>
Robert Irelan <rirelan@gmail.com>
Robert Knight <robertknight@gmail.com>
Robert Millar <robert.millar@cantab.net>
Roland Tanglao <roland@rolandtanglao.com>
Ron Dahlgren <ronald.dahlgren@gmail.com>
Roy Frostig <rfrostig@mozilla.com>
Russell <rpjohnst@gmail.com>
Ruud van Asseldonk <dev@veniogames.com>
Ryan Mulligan <ryan@ryantm.com>
Ryan Scheel <ryan.havvy@gmail.com>
Ryman <haqkrs@gmail.com>
Rüdiger Sonderfeld <ruediger@c-plusplus.de>
S Pradeep Kumar <gohanpra@gmail.com>
Salem Talha <salem.a.talha@gmail.com>
Samuel Chase <samebchase@gmail.com>
Samuel Neves <sneves@dei.uc.pt>
Sander Mathijs van Veen <smvv@kompiler.org>
Sangeun Kim <sammy.kim@samsung.com>
Sankha Narayan Guria <sankha93@gmail.com>
Santiago Pastorino <santiago@wyeworks.com>
Santiago Rodriguez <sanrodari@gmail.com>
Saurabh Anand <saurabhanandiit@gmail.com>
Scott Jenkins <scottdjwales@gmail.com>
Scott Lawrence <bytbox@gmail.com>
Sean Chalmers <sclhiannan@gmail.com>
Sean Gillespie <sean.william.g@gmail.com>
Sean McArthur <sean.monstar@gmail.com>
Sean Moon <ssamoon@ucla.edu>
Sean Stangl <sstangl@mozilla.com>
Sebastian N. Fernandez <cachobot@gmail.com>
Sebastian Zaha <sebastian.zaha@gmail.com>
Sebastien Martini <seb@dbzteam.org>
Seo Sanghyeon <sanxiyn@gmail.com>
Seonghyun Kim <sh8281.kim@samsung.com>
Sergio Benitez <sbenitez@mit.edu>
Seth Pink <sethpink@gmail.com>
Shamir Khodzha <khodzha.sh@gmail.com>
SiegeLord <slabode@aim.com>
Simon Barber-Dueck <sbarberdueck@gmail.com>
Simon Persson <simon@flaskpost.org>
Simon Sapin <simon@exyr.org>
Squeaky <squeaky_pl@gmx.com>
Stefan Plantikow <stefan.plantikow@googlemail.com>
Stepan Koltsov <stepan.koltsov@gmail.com>
Sterling Greene <sterling.greene@gmail.com>
Steve Klabnik <steve@steveklabnik.com>
Steven De Coeyer <steven@banteng.be>
Steven Fackler <sfackler@gmail.com>
Steven Sheldon <steven@sasheldon.com>
Steven Stewart-Gallus <sstewartgallus00@langara.bc.ca>
Strahinja Val Markovic <val@markovic.io>
Stuart Pernsteiner <spernsteiner@mozilla.com>
Sylvestre Ledru <sylvestre@debian.org>
Sébastien Chauvel <eichi237@mailoo.org>
Sébastien Crozet <developer@crozet.re>
Sébastien Paolacci <sebastien.paolacci@gmail.com>
Taras Shpot <mrshpot@gmail.com>
Ted Horst <ted.horst@earthlink.net>
Thad Guidry <thadguidry@gmail.com>
Thomas Backman <serenity@exscape.org>
Thomas Daede <daede003@umn.edu>
Till Hoeppner <till@hoeppner.ws>
Tim Brooks <tim.brooks@staples.com>
Tim Chevalier <chevalier@alum.wellesley.edu>
Tim Joseph Dumol <tim@timdumol.com>
Tim Kuehn <tkuehn@cmu.edu>
Tim Taubert <tim@timtaubert.de>
Timothée Ravier <tim@siosm.fr>
Tobba <tobias.haegermarck@gmail.com>
Tobias Bucher <tobiasbucher5991@gmail.com>
Tohava <tohava@tohava-laptop.(none)>
Tom Jakubowski <tom@crystae.net>
Tom Lee <github@tomlee.co>
Tomas Sedovic <tomas@sedovic.cz>
Tommy M. McGuire <mcguire@crsr.net>
Tomoki Aonuma <uasi@99cm.org>
Tony Young <tony@rfw.name>
Torsten Weber <TorstenWeber12@gmail.com>
Trent Ogren <tedwardo2@gmail.com>
Trinick <slicksilver555@mac.com>
Tshepang Lekhonkhobe <tshepang@gmail.com>
Tuncer Ayaz <tuncer.ayaz@gmail.com>
TyOverby <ty@pre-alpha.com>
Tycho Sci <tychosci@gmail.com>
Tyler Bindon <martica@martica.org>
U-NOV2010\eugals
Utkarsh Kukreti <utkarshkukreti@gmail.com>
Uwe Dauernheim <uwe@dauernheim.net>
Vadim Chugunov <vadimcn@gmail.com>
Valentin Tsatskin <vtsatskin@mozilla.com>
Valerii Hiora <valerii.hiora@gmail.com>
Victor Berger <victor.berger@m4x.org>
Vijay Korapaty <rust@korapaty.com>
Viktor Dahl <pazaconyoman@gmail.com>
Vincent Belliard <vincent@famillebelliard.fr>
Vinzent Steinberg <Vinzent.Steinberg@gmail.com>
Virgile Andreani <virgile.andreani@anbuco.fr>
Vivek Galatage <vivekgalatage@gmail.com>
Vladimir Pouzanov <farcaller@gmail.com>
Volker Mische <volker.mische@gmail.com>
Wade Mealing <wmealing@gmail.com>
WebeWizard <webewizard@gmail.com>
Wendell Smith <wendell.smith@yale.edu>
William Ting <io@williamting.com>
Yasuhiro Fujii <y-fujii@mimosa-pudica.net>
Yazhong Liu <yorkiefixer@gmail.com>
Yehuda Katz <wycats@gmail.com>
Young-il Choi <duddlf.choi@samsung.com>
Youngmin Yoo <youngmin.yoo@samsung.com>
Youngsoo Son <ysson83@gmail.com>
Yuri Albuquerque <yuridenommus@gmail.com>
Yuri Kunde Schlesner <yuriks@yuriks.net>
Zach Kamsler <smoo.master@gmail.com>
Zach Pomerantz <zmp@umich.edu>
Zack Corr <zack@z0w0.me>
Zack Slayton <zack.slayton@gmail.com>
Zbigniew Siciarz <antyqjon@gmail.com>
Ziad Hatahet <hatahet@gmail.com>
Zooko Wilcox-O'Hearn <zooko@zooko.com>
aochagavia <aochagavia92@gmail.com>
auREAX <mark@xn--hwg34fba.ws>
b1nd <clint.ryan3@gmail.com>
bachm <Ab@vapor.com>
blake2-ppc <ulrik.sverdrup@gmail.com>
bors <bors@rust-lang.org>
chitra
chromatic <chromatic@wgz.org>
comex <comexk@gmail.com>
darkf <lw9k123@gmail.com>
dgoon <dgoon@dgoon.net>
donkopotamus <general@chocolate-fish.com>
eliovir <eliovir@gmail.com>
flo-l <lacknerflo@gmail.com>
fort <e@mail.com>
free-Runner <aali07@students.poly.edu>
g3xzh <g3xzh@yahoo.com>
gamazeps <gamaz3ps@gmail.com>
gentlefolk <cemacken@gmail.com>
gifnksm <makoto.nksm@gmail.com>
hansjorg <hansjorg@gmail.com>
iancormac84 <wilnathan@gmail.com>
inrustwetrust <inrustwetrust@users.noreply.github.com>
jamesluke <jamesluke@users.noreply.github.com>
jmgrosen <jmgrosen@gmail.com>
joaoxsouls <joaoxsouls@gmail.com>
klutzy <klutzytheklutzy@gmail.com>
korenchkin <korenchkin2@gmail.com>
kvark <kvarkus@gmail.com>
kwantam <kwantam@gmail.com>
lpy <pylaurent1314@gmail.com>
lucy <ne.tetewi@gmail.com>
lyuts <dioxinu@gmail.com>
m-r-r <raybaudroigm@gmail.com>
maikklein <maikklein@googlemail.com>
masklinn <github.com@masklinn.net>
mdinger <mdinger.bugzilla@gmail.com>
mitchmindtree <mitchell.nordine@gmail.com>
moonglum <moonglum@moonbeamlabs.com>
mr.Shu <mr@shu.io>
mrec <mike.capp@gmail.com>
musitdev <philippe.delrieu@free.fr>
nham <hamann.nick@gmail.com>
noam <noam@clusterfoo.com>
novalis <novalis@novalis.org>
osa1 <omeragacan@gmail.com>
reedlepee <reedlepee123@gmail.com>
sevrak <sevrak@rediffmail.com>
smenardpw <sebastien@knoglr.com>
sp3d <sp3d@github>
startling <tdixon51793@gmail.com>
theptrk <patrick.tran06@gmail.com>
tinaun <tinagma@gmail.com>
ville-h <ville3.14159@gmail.com>
wickerwaka <martin.donlon@gmail.com>
xales <xales@naveria.com>
zofrex <zofrex@gmail.com>
zslayton <zack.slayton@gmail.com>
zzmp <zmp@umich.edu>

View File

@ -0,0 +1,22 @@
Short version for non-lawyers:
The Rust Project is dual-licensed under Apache 2.0 and MIT
terms.
Longer version:
The Rust Project is copyright 2014, The Rust Project
Developers (given in the file AUTHORS.txt).
Licensed under the Apache License, Version 2.0
<LICENSE-APACHE or
http://www.apache.org/licenses/LICENSE-2.0> or the MIT
license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
at your option. All files in the project carrying such
notice may not be copied, modified, or distributed except
according to those terms.
The Rust Project includes packages written by third parties,
but it is not included in this distribution.

View File

@ -0,0 +1,720 @@
// Copied from:
// rust/src/librustrt/mutex.rs
// git: 7828c3dd2858d8f3a0448484d8093e22719dbda0
// No changes.
// TODO replace with proper runtime-less native threading once Rust gains
// support for this.
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A native mutex and condition variable type.
//!
//! This module contains bindings to the platform's native mutex/condition
//! variable primitives. It provides two types: `StaticNativeMutex`, which can
//! be statically initialized via the `NATIVE_MUTEX_INIT` value, and a simple
//! wrapper `NativeMutex` that has a destructor to clean up after itself. These
//! objects serve as both mutexes and condition variables simultaneously.
//!
//! The static lock is lazily initialized, but it can only be unsafely
//! destroyed. A statically initialized lock doesn't necessarily have a time at
//! which it can get deallocated. For this reason, there is no `Drop`
//! implementation of the static mutex, but rather the `destroy()` method must
//! be invoked manually if destruction of the mutex is desired.
//!
//! The non-static `NativeMutex` type does have a destructor, but cannot be
//! statically initialized.
//!
//! It is not recommended to use this type for idiomatic rust use. These types
//! are appropriate where no other options are available, but other rust
//! concurrency primitives should be used before them: the `sync` crate defines
//! `StaticMutex` and `Mutex` types.
//!
//! # Example
//!
//! ```rust
//! use std::rt::mutex::{NativeMutex, StaticNativeMutex, NATIVE_MUTEX_INIT};
//!
//! // Use a statically initialized mutex
//! static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
//!
//! unsafe {
//! let _guard = LOCK.lock();
//! } // automatically unlocked here
//!
//! // Use a normally initialized mutex
//! unsafe {
//! let mut lock = NativeMutex::new();
//!
//! {
//! let _guard = lock.lock();
//! } // unlocked here
//!
//! // sometimes the RAII guard isn't appropriate
//! lock.lock_noguard();
//! lock.unlock_noguard();
//! } // `lock` is deallocated here
//! ```
#![allow(non_camel_case_types)]
use core::prelude::*;
/// A native mutex suitable for storing in statics (that is, it has
/// the `destroy` method rather than a destructor).
///
/// Prefer the `NativeMutex` type where possible, since that does not
/// require manual deallocation.
pub struct StaticNativeMutex {
inner: imp::Mutex,
}
/// A native mutex with a destructor for clean-up.
///
/// See `StaticNativeMutex` for a version that is suitable for storing in
/// statics.
pub struct NativeMutex {
inner: StaticNativeMutex
}
/// Automatically unlocks the mutex that it was created from on
/// destruction.
///
/// Using this makes lock-based code resilient to unwinding/task
/// panic, because the lock will be automatically unlocked even
/// then.
#[must_use]
pub struct LockGuard<'a> {
lock: &'a StaticNativeMutex
}
pub const NATIVE_MUTEX_INIT: StaticNativeMutex = StaticNativeMutex {
inner: imp::MUTEX_INIT,
};
impl StaticNativeMutex {
/// Creates a new mutex.
///
/// Note that a mutex created in this way needs to be explicit
/// freed with a call to `destroy` or it will leak.
/// Also it is important to avoid locking until mutex has stopped moving
pub unsafe fn new() -> StaticNativeMutex {
StaticNativeMutex { inner: imp::Mutex::new() }
}
/// Acquires this lock. This assumes that the current thread does not
/// already hold the lock.
///
/// # Example
///
/// ```rust
/// use std::rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
/// static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
/// unsafe {
/// let _guard = LOCK.lock();
/// // critical section...
/// } // automatically unlocked in `_guard`'s destructor
/// ```
///
/// # Unsafety
///
/// This method is unsafe because it will not function correctly if this
/// mutex has been *moved* since it was last used. The mutex can move an
/// arbitrary number of times before its first usage, but once a mutex has
/// been used once it is no longer allowed to move (or otherwise it invokes
/// undefined behavior).
///
/// Additionally, this type does not take into account any form of
/// scheduling model. This will unconditionally block the *os thread* which
/// is not always desired.
pub unsafe fn lock<'a>(&'a self) -> LockGuard<'a> {
self.inner.lock();
LockGuard { lock: self }
}
/// Attempts to acquire the lock. The value returned is `Some` if
/// the attempt succeeded.
///
/// # Unsafety
///
/// This method is unsafe for the same reasons as `lock`.
pub unsafe fn trylock<'a>(&'a self) -> Option<LockGuard<'a>> {
if self.inner.trylock() {
Some(LockGuard { lock: self })
} else {
None
}
}
/// Acquire the lock without creating a `LockGuard`.
///
/// These needs to be paired with a call to `.unlock_noguard`. Prefer using
/// `.lock`.
///
/// # Unsafety
///
/// This method is unsafe for the same reasons as `lock`. Additionally, this
/// does not guarantee that the mutex will ever be unlocked, and it is
/// undefined to drop an already-locked mutex.
pub unsafe fn lock_noguard(&self) { self.inner.lock() }
/// Attempts to acquire the lock without creating a
/// `LockGuard`. The value returned is whether the lock was
/// acquired or not.
///
/// If `true` is returned, this needs to be paired with a call to
/// `.unlock_noguard`. Prefer using `.trylock`.
///
/// # Unsafety
///
/// This method is unsafe for the same reasons as `lock_noguard`.
pub unsafe fn trylock_noguard(&self) -> bool {
self.inner.trylock()
}
/// Unlocks the lock. This assumes that the current thread already holds the
/// lock.
///
/// # Unsafety
///
/// This method is unsafe for the same reasons as `lock`. Additionally, it
/// is not guaranteed that this is unlocking a previously locked mutex. It
/// is undefined to unlock an unlocked mutex.
pub unsafe fn unlock_noguard(&self) { self.inner.unlock() }
/// Block on the internal condition variable.
///
/// This function assumes that the lock is already held. Prefer
/// using `LockGuard.wait` since that guarantees that the lock is
/// held.
///
/// # Unsafety
///
/// This method is unsafe for the same reasons as `lock`. Additionally, this
/// is unsafe because the mutex may not be currently locked.
pub unsafe fn wait_noguard(&self) { self.inner.wait() }
/// Signals a thread in `wait` to wake up
///
/// # Unsafety
///
/// This method is unsafe for the same reasons as `lock`. Additionally, this
/// is unsafe because the mutex may not be currently locked.
pub unsafe fn signal_noguard(&self) { self.inner.signal() }
/// This function is especially unsafe because there are no guarantees made
/// that no other thread is currently holding the lock or waiting on the
/// condition variable contained inside.
pub unsafe fn destroy(&self) { self.inner.destroy() }
}
impl NativeMutex {
/// Creates a new mutex.
///
/// The user must be careful to ensure the mutex is not locked when its is
/// being destroyed.
/// Also it is important to avoid locking until mutex has stopped moving
pub unsafe fn new() -> NativeMutex {
NativeMutex { inner: StaticNativeMutex::new() }
}
/// Acquires this lock. This assumes that the current thread does not
/// already hold the lock.
///
/// # Example
///
/// ```rust
/// use std::rt::mutex::NativeMutex;
/// unsafe {
/// let mut lock = NativeMutex::new();
///
/// {
/// let _guard = lock.lock();
/// // critical section...
/// } // automatically unlocked in `_guard`'s destructor
/// }
/// ```
///
/// # Unsafety
///
/// This method is unsafe due to the same reasons as
/// `StaticNativeMutex::lock`.
pub unsafe fn lock<'a>(&'a self) -> LockGuard<'a> {
self.inner.lock()
}
/// Attempts to acquire the lock. The value returned is `Some` if
/// the attempt succeeded.
///
/// # Unsafety
///
/// This method is unsafe due to the same reasons as
/// `StaticNativeMutex::trylock`.
pub unsafe fn trylock<'a>(&'a self) -> Option<LockGuard<'a>> {
self.inner.trylock()
}
/// Acquire the lock without creating a `LockGuard`.
///
/// These needs to be paired with a call to `.unlock_noguard`. Prefer using
/// `.lock`.
///
/// # Unsafety
///
/// This method is unsafe due to the same reasons as
/// `StaticNativeMutex::lock_noguard`.
pub unsafe fn lock_noguard(&self) { self.inner.lock_noguard() }
/// Attempts to acquire the lock without creating a
/// `LockGuard`. The value returned is whether the lock was
/// acquired or not.
///
/// If `true` is returned, this needs to be paired with a call to
/// `.unlock_noguard`. Prefer using `.trylock`.
///
/// # Unsafety
///
/// This method is unsafe due to the same reasons as
/// `StaticNativeMutex::trylock_noguard`.
pub unsafe fn trylock_noguard(&self) -> bool {
self.inner.trylock_noguard()
}
/// Unlocks the lock. This assumes that the current thread already holds the
/// lock.
///
/// # Unsafety
///
/// This method is unsafe due to the same reasons as
/// `StaticNativeMutex::unlock_noguard`.
pub unsafe fn unlock_noguard(&self) { self.inner.unlock_noguard() }
/// Block on the internal condition variable.
///
/// This function assumes that the lock is already held. Prefer
/// using `LockGuard.wait` since that guarantees that the lock is
/// held.
///
/// # Unsafety
///
/// This method is unsafe due to the same reasons as
/// `StaticNativeMutex::wait_noguard`.
pub unsafe fn wait_noguard(&self) { self.inner.wait_noguard() }
/// Signals a thread in `wait` to wake up
///
/// # Unsafety
///
/// This method is unsafe due to the same reasons as
/// `StaticNativeMutex::signal_noguard`.
pub unsafe fn signal_noguard(&self) { self.inner.signal_noguard() }
}
impl Drop for NativeMutex {
fn drop(&mut self) {
unsafe {self.inner.destroy()}
}
}
impl<'a> LockGuard<'a> {
/// Block on the internal condition variable.
pub unsafe fn wait(&self) {
self.lock.wait_noguard()
}
/// Signals a thread in `wait` to wake up.
pub unsafe fn signal(&self) {
self.lock.signal_noguard()
}
}
#[unsafe_destructor]
impl<'a> Drop for LockGuard<'a> {
fn drop(&mut self) {
unsafe {self.lock.unlock_noguard()}
}
}
#[cfg(unix)]
mod imp {
use libc;
use self::os::{PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER,
pthread_mutex_t, pthread_cond_t};
use core::cell::UnsafeCell;
type pthread_mutexattr_t = libc::c_void;
type pthread_condattr_t = libc::c_void;
#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))]
mod os {
use libc;
pub type pthread_mutex_t = *mut libc::c_void;
pub type pthread_cond_t = *mut libc::c_void;
pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t =
0 as pthread_mutex_t;
pub const PTHREAD_COND_INITIALIZER: pthread_cond_t =
0 as pthread_cond_t;
}
#[cfg(any(target_os = "macos", target_os = "ios"))]
mod os {
use libc;
#[cfg(target_arch = "x86_64")]
const __PTHREAD_MUTEX_SIZE__: uint = 56;
#[cfg(target_arch = "x86_64")]
const __PTHREAD_COND_SIZE__: uint = 40;
#[cfg(target_arch = "x86")]
const __PTHREAD_MUTEX_SIZE__: uint = 40;
#[cfg(target_arch = "x86")]
const __PTHREAD_COND_SIZE__: uint = 24;
#[cfg(target_arch = "arm")]
const __PTHREAD_MUTEX_SIZE__: uint = 40;
#[cfg(target_arch = "arm")]
const __PTHREAD_COND_SIZE__: uint = 24;
const _PTHREAD_MUTEX_SIG_INIT: libc::c_long = 0x32AAABA7;
const _PTHREAD_COND_SIG_INIT: libc::c_long = 0x3CB0B1BB;
#[repr(C)]
pub struct pthread_mutex_t {
__sig: libc::c_long,
__opaque: [u8, ..__PTHREAD_MUTEX_SIZE__],
}
#[repr(C)]
pub struct pthread_cond_t {
__sig: libc::c_long,
__opaque: [u8, ..__PTHREAD_COND_SIZE__],
}
pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t {
__sig: _PTHREAD_MUTEX_SIG_INIT,
__opaque: [0, ..__PTHREAD_MUTEX_SIZE__],
};
pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t {
__sig: _PTHREAD_COND_SIG_INIT,
__opaque: [0, ..__PTHREAD_COND_SIZE__],
};
}
#[cfg(target_os = "linux")]
mod os {
use libc;
// minus 8 because we have an 'align' field
#[cfg(target_arch = "x86_64")]
const __SIZEOF_PTHREAD_MUTEX_T: uint = 40 - 8;
#[cfg(target_arch = "x86")]
const __SIZEOF_PTHREAD_MUTEX_T: uint = 24 - 8;
#[cfg(target_arch = "arm")]
const __SIZEOF_PTHREAD_MUTEX_T: uint = 24 - 8;
#[cfg(target_arch = "mips")]
const __SIZEOF_PTHREAD_MUTEX_T: uint = 24 - 8;
#[cfg(target_arch = "mipsel")]
const __SIZEOF_PTHREAD_MUTEX_T: uint = 24 - 8;
#[cfg(target_arch = "x86_64")]
const __SIZEOF_PTHREAD_COND_T: uint = 48 - 8;
#[cfg(target_arch = "x86")]
const __SIZEOF_PTHREAD_COND_T: uint = 48 - 8;
#[cfg(target_arch = "arm")]
const __SIZEOF_PTHREAD_COND_T: uint = 48 - 8;
#[cfg(target_arch = "mips")]
const __SIZEOF_PTHREAD_COND_T: uint = 48 - 8;
#[cfg(target_arch = "mipsel")]
const __SIZEOF_PTHREAD_COND_T: uint = 48 - 8;
#[repr(C)]
pub struct pthread_mutex_t {
__align: libc::c_longlong,
size: [u8, ..__SIZEOF_PTHREAD_MUTEX_T],
}
#[repr(C)]
pub struct pthread_cond_t {
__align: libc::c_longlong,
size: [u8, ..__SIZEOF_PTHREAD_COND_T],
}
pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t {
__align: 0,
size: [0, ..__SIZEOF_PTHREAD_MUTEX_T],
};
pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t {
__align: 0,
size: [0, ..__SIZEOF_PTHREAD_COND_T],
};
}
#[cfg(target_os = "android")]
mod os {
use libc;
#[repr(C)]
pub struct pthread_mutex_t { value: libc::c_int }
#[repr(C)]
pub struct pthread_cond_t { value: libc::c_int }
pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t {
value: 0,
};
pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t {
value: 0,
};
}
pub struct Mutex {
lock: UnsafeCell<pthread_mutex_t>,
cond: UnsafeCell<pthread_cond_t>,
}
pub const MUTEX_INIT: Mutex = Mutex {
lock: UnsafeCell { value: PTHREAD_MUTEX_INITIALIZER },
cond: UnsafeCell { value: PTHREAD_COND_INITIALIZER },
};
impl Mutex {
pub unsafe fn new() -> Mutex {
// As mutex might be moved and address is changing it
// is better to avoid initialization of potentially
// opaque OS data before it landed
let m = Mutex {
lock: UnsafeCell::new(PTHREAD_MUTEX_INITIALIZER),
cond: UnsafeCell::new(PTHREAD_COND_INITIALIZER),
};
return m;
}
pub unsafe fn lock(&self) { pthread_mutex_lock(self.lock.get()); }
pub unsafe fn unlock(&self) { pthread_mutex_unlock(self.lock.get()); }
pub unsafe fn signal(&self) { pthread_cond_signal(self.cond.get()); }
pub unsafe fn wait(&self) {
pthread_cond_wait(self.cond.get(), self.lock.get());
}
pub unsafe fn trylock(&self) -> bool {
pthread_mutex_trylock(self.lock.get()) == 0
}
pub unsafe fn destroy(&self) {
pthread_mutex_destroy(self.lock.get());
pthread_cond_destroy(self.cond.get());
}
}
extern {
fn pthread_mutex_destroy(lock: *mut pthread_mutex_t) -> libc::c_int;
fn pthread_cond_destroy(cond: *mut pthread_cond_t) -> libc::c_int;
fn pthread_mutex_lock(lock: *mut pthread_mutex_t) -> libc::c_int;
fn pthread_mutex_trylock(lock: *mut pthread_mutex_t) -> libc::c_int;
fn pthread_mutex_unlock(lock: *mut pthread_mutex_t) -> libc::c_int;
fn pthread_cond_wait(cond: *mut pthread_cond_t,
lock: *mut pthread_mutex_t) -> libc::c_int;
fn pthread_cond_signal(cond: *mut pthread_cond_t) -> libc::c_int;
}
}
#[cfg(windows)]
mod imp {
use alloc::heap;
use core::atomic;
use core::ptr;
use libc::{HANDLE, BOOL, LPSECURITY_ATTRIBUTES, c_void, DWORD, LPCSTR};
use libc;
type LPCRITICAL_SECTION = *mut c_void;
const SPIN_COUNT: DWORD = 4000;
#[cfg(target_arch = "x86")]
const CRIT_SECTION_SIZE: uint = 24;
#[cfg(target_arch = "x86_64")]
const CRIT_SECTION_SIZE: uint = 40;
pub struct Mutex {
// pointers for the lock/cond handles, atomically updated
lock: atomic::AtomicUint,
cond: atomic::AtomicUint,
}
pub const MUTEX_INIT: Mutex = Mutex {
lock: atomic::INIT_ATOMIC_UINT,
cond: atomic::INIT_ATOMIC_UINT,
};
impl Mutex {
pub unsafe fn new() -> Mutex {
Mutex {
lock: atomic::AtomicUint::new(init_lock()),
cond: atomic::AtomicUint::new(init_cond()),
}
}
pub unsafe fn lock(&self) {
EnterCriticalSection(self.getlock() as LPCRITICAL_SECTION)
}
pub unsafe fn trylock(&self) -> bool {
TryEnterCriticalSection(self.getlock() as LPCRITICAL_SECTION) != 0
}
pub unsafe fn unlock(&self) {
LeaveCriticalSection(self.getlock() as LPCRITICAL_SECTION)
}
pub unsafe fn wait(&self) {
self.unlock();
WaitForSingleObject(self.getcond() as HANDLE, libc::INFINITE);
self.lock();
}
pub unsafe fn signal(&self) {
assert!(SetEvent(self.getcond() as HANDLE) != 0);
}
/// This function is especially unsafe because there are no guarantees made
/// that no other thread is currently holding the lock or waiting on the
/// condition variable contained inside.
pub unsafe fn destroy(&self) {
let lock = self.lock.swap(0, atomic::SeqCst);
let cond = self.cond.swap(0, atomic::SeqCst);
if lock != 0 { free_lock(lock) }
if cond != 0 { free_cond(cond) }
}
unsafe fn getlock(&self) -> *mut c_void {
match self.lock.load(atomic::SeqCst) {
0 => {}
n => return n as *mut c_void
}
let lock = init_lock();
match self.lock.compare_and_swap(0, lock, atomic::SeqCst) {
0 => return lock as *mut c_void,
_ => {}
}
free_lock(lock);
return self.lock.load(atomic::SeqCst) as *mut c_void;
}
unsafe fn getcond(&self) -> *mut c_void {
match self.cond.load(atomic::SeqCst) {
0 => {}
n => return n as *mut c_void
}
let cond = init_cond();
match self.cond.compare_and_swap(0, cond, atomic::SeqCst) {
0 => return cond as *mut c_void,
_ => {}
}
free_cond(cond);
return self.cond.load(atomic::SeqCst) as *mut c_void;
}
}
pub unsafe fn init_lock() -> uint {
let block = heap::allocate(CRIT_SECTION_SIZE, 8) as *mut c_void;
InitializeCriticalSectionAndSpinCount(block, SPIN_COUNT);
return block as uint;
}
pub unsafe fn init_cond() -> uint {
return CreateEventA(ptr::null_mut(), libc::FALSE, libc::FALSE,
ptr::null()) as uint;
}
pub unsafe fn free_lock(h: uint) {
DeleteCriticalSection(h as LPCRITICAL_SECTION);
heap::deallocate(h as *mut u8, CRIT_SECTION_SIZE, 8);
}
pub unsafe fn free_cond(h: uint) {
let block = h as HANDLE;
libc::CloseHandle(block);
}
#[allow(non_snake_case)]
extern "system" {
fn CreateEventA(lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
bManualReset: BOOL,
bInitialState: BOOL,
lpName: LPCSTR) -> HANDLE;
fn InitializeCriticalSectionAndSpinCount(
lpCriticalSection: LPCRITICAL_SECTION,
dwSpinCount: DWORD) -> BOOL;
fn DeleteCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
fn EnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
fn LeaveCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
fn TryEnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION) -> BOOL;
fn SetEvent(hEvent: HANDLE) -> BOOL;
fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) -> DWORD;
}
}
#[cfg(test)]
mod test {
use std::prelude::*;
use std::mem::drop;
use super::{StaticNativeMutex, NATIVE_MUTEX_INIT};
use std::rt::thread::Thread;
#[test]
fn smoke_lock() {
static LK: StaticNativeMutex = NATIVE_MUTEX_INIT;
unsafe {
let _guard = LK.lock();
}
}
#[test]
fn smoke_cond() {
static LK: StaticNativeMutex = NATIVE_MUTEX_INIT;
unsafe {
let guard = LK.lock();
let t = Thread::start(proc() {
let guard = LK.lock();
guard.signal();
});
guard.wait();
drop(guard);
t.join();
}
}
#[test]
fn smoke_lock_noguard() {
static LK: StaticNativeMutex = NATIVE_MUTEX_INIT;
unsafe {
LK.lock_noguard();
LK.unlock_noguard();
}
}
#[test]
fn smoke_cond_noguard() {
static LK: StaticNativeMutex = NATIVE_MUTEX_INIT;
unsafe {
LK.lock_noguard();
let t = Thread::start(proc() {
LK.lock_noguard();
LK.signal_noguard();
LK.unlock_noguard();
});
LK.wait_noguard();
LK.unlock_noguard();
t.join();
}
}
#[test]
fn destroy_immediately() {
unsafe {
let m = StaticNativeMutex::new();
m.destroy();
}
}
}

View File

@ -0,0 +1,274 @@
// Copied from:
// rust/src/librustrt/stack.rs
// git: 70cef9474a3307ec763efc01fe6969e542083823
// stack_exhausted() function deleted, no other changes.
// TODO replace with proper runtime-less native threading once Rust gains
// support for this.
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Rust stack-limit management
//!
//! Currently Rust uses a segmented-stack-like scheme in order to detect stack
//! overflow for rust tasks. In this scheme, the prologue of all functions are
//! preceded with a check to see whether the current stack limits are being
//! exceeded.
//!
//! This module provides the functionality necessary in order to manage these
//! stack limits (which are stored in platform-specific locations). The
//! functions here are used at the borders of the task lifetime in order to
//! manage these limits.
//!
//! This function is an unstable module because this scheme for stack overflow
//! detection is not guaranteed to continue in the future. Usage of this module
//! is discouraged unless absolutely necessary.
// iOS related notes
//
// It is possible to implement it using idea from
// http://www.opensource.apple.com/source/Libc/Libc-825.40.1/pthreads/pthread_machdep.h
//
// In short: _pthread_{get,set}_specific_direct allows extremely fast
// access, exactly what is required for segmented stack
// There is a pool of reserved slots for Apple internal use (0..119)
// First dynamic allocated pthread key starts with 257 (on iOS7)
// So using slot 149 should be pretty safe ASSUMING space is reserved
// for every key < first dynamic key
//
// There is also an opportunity to steal keys reserved for Garbage Collection
// ranges 80..89 and 110..119, especially considering the fact Garbage Collection
// never supposed to work on iOS. But as everybody knows it - there is a chance
// that those slots will be re-used, like it happened with key 95 (moved from
// JavaScriptCore to CoreText)
//
// Unfortunately Apple rejected patch to LLVM which generated
// corresponding prolog, decision was taken to disable segmented
// stack support on iOS.
pub const RED_ZONE: uint = 20 * 1024;
// Windows maintains a record of upper and lower stack bounds in the Thread Information
// Block (TIB), and some syscalls do check that addresses which are supposed to be in
// the stack, indeed lie between these two values.
// (See https://github.com/rust-lang/rust/issues/3445#issuecomment-26114839)
//
// When using Rust-managed stacks (libgreen), we must maintain these values accordingly.
// For OS-managed stacks (libnative), we let the OS manage them for us.
//
// On all other platforms both variants behave identically.
#[inline(always)]
pub unsafe fn record_os_managed_stack_bounds(stack_lo: uint, _stack_hi: uint) {
record_sp_limit(stack_lo + RED_ZONE);
}
#[inline(always)]
pub unsafe fn record_rust_managed_stack_bounds(stack_lo: uint, stack_hi: uint) {
// When the old runtime had segmented stacks, it used a calculation that was
// "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic
// symbol resolution, llvm function calls, etc. In theory this red zone
// value is 0, but it matters far less when we have gigantic stacks because
// we don't need to be so exact about our stack budget. The "fudge factor"
// was because LLVM doesn't emit a stack check for functions < 256 bytes in
// size. Again though, we have giant stacks, so we round all these
// calculations up to the nice round number of 20k.
record_sp_limit(stack_lo + RED_ZONE);
return target_record_stack_bounds(stack_lo, stack_hi);
#[cfg(not(windows))] #[inline(always)]
unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {}
#[cfg(all(windows, target_arch = "x86"))] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// stack range is at TIB: %fs:0x04 (top) and %fs:0x08 (bottom)
asm!("mov $0, %fs:0x04" :: "r"(stack_hi) :: "volatile");
asm!("mov $0, %fs:0x08" :: "r"(stack_lo) :: "volatile");
}
#[cfg(all(windows, target_arch = "x86_64"))] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom)
asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile");
asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile");
}
}
/// Records the current limit of the stack as specified by `end`.
///
/// This is stored in an OS-dependent location, likely inside of the thread
/// local storage. The location that the limit is stored is a pre-ordained
/// location because it's where LLVM has emitted code to check.
///
/// Note that this cannot be called under normal circumstances. This function is
/// changing the stack limit, so upon returning any further function calls will
/// possibly be triggering the morestack logic if you're not careful.
///
/// Also note that this and all of the inside functions are all flagged as
/// "inline(always)" because they're messing around with the stack limits. This
/// would be unfortunate for the functions themselves to trigger a morestack
/// invocation (if they were an actual function call).
#[inline(always)]
pub unsafe fn record_sp_limit(limit: uint) {
return target_record_sp_limit(limit);
// x86-64
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $$0x60+90*8, %rsi
movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
#[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:32" :: "r"(limit) :: "volatile")
}
// x86
#[cfg(all(target_arch = "x86",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movl $$0x48+90*4, %eax
movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile")
}
#[cfg(all(target_arch = "x86",
any(target_os = "linux", target_os = "freebsd")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
// mips, arm - Some brave soul can port these to inline asm, but it's over
// my head personally
#[cfg(any(target_arch = "mips",
target_arch = "mipsel",
all(target_arch = "arm", not(target_os = "ios"))))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
use libc::c_void;
return record_sp_limit(limit as *const c_void);
extern {
fn record_sp_limit(limit: *const c_void);
}
}
// iOS segmented stack is disabled for now, see related notes
#[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
}
/// The counterpart of the function above, this function will fetch the current
/// stack limit stored in TLS.
///
/// Note that all of these functions are meant to be exact counterparts of their
/// brethren above, except that the operands are reversed.
///
/// As with the setter, this function does not have a __morestack header and can
/// therefore be called in a "we're out of stack" situation.
#[inline(always)]
pub unsafe fn get_sp_limit() -> uint {
return target_get_sp_limit();
// x86-64
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq $$0x60+90*8, %rsi
movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
return 1024;
}
#[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:32, $0" : "=r"(limit) ::: "volatile");
return limit;
}
// x86
#[cfg(all(target_arch = "x86",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movl $$0x48+90*4, %eax
movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile");
return limit;
}
#[cfg(all(target_arch = "x86",
any(target_os = "linux", target_os = "freebsd")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
return 1024;
}
// mips, arm - Some brave soul can port these to inline asm, but it's over
// my head personally
#[cfg(any(target_arch = "mips",
target_arch = "mipsel",
all(target_arch = "arm", not(target_os = "ios"))))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
use libc::c_void;
return get_sp_limit() as uint;
extern {
fn get_sp_limit() -> *const c_void;
}
}
// iOS doesn't support segmented stacks yet. This function might
// be called by runtime though so it is unsafe to mark it as
// unreachable, let's return a fixed constant.
#[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
1024
}
}

View File

@ -0,0 +1,401 @@
// Copied from:
// rust/src/librustrt/stack_overflow.rs
// git: 7828c3dd2858d8f3a0448484d8093e22719dbda0
// report() changed to always call core::intrinstics::abort()
// get_task_guard_page() changed to always return Some(0)
// TODO replace with proper runtime-less native threading once Rust gains
// support for this.
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_camel_case_types)]
use core::prelude::*;
use core::intrinsics::abort;
use libc;
pub unsafe fn init() {
imp::init();
}
pub unsafe fn cleanup() {
imp::cleanup();
}
pub struct Handler {
_data: *mut libc::c_void
}
impl Handler {
pub unsafe fn new() -> Handler {
imp::make_handler()
}
}
impl Drop for Handler {
fn drop(&mut self) {
unsafe {
imp::drop_handler(self);
}
}
}
pub unsafe fn report() {
// Don't try anything clever, just kill the whole process.
abort();
}
// get_task_info is called from an exception / signal handler.
// It returns the guard page of the current task or 0 if that
// guard page doesn't exist. None is returned if there's currently
// no local task.
#[cfg(any(windows, target_os = "linux", target_os = "macos"))]
unsafe fn get_task_guard_page() -> Option<uint>
{
Some(0)
}
#[cfg(windows)]
#[allow(non_snake_case)]
mod imp {
use core::ptr;
use core::mem;
use libc;
use libc::types::os::arch::extra::{LPVOID, DWORD, LONG, BOOL};
use stack;
use super::{Handler, get_task_guard_page, report};
// This is initialized in init() and only read from after
static mut PAGE_SIZE: uint = 0;
#[no_stack_check]
extern "system" fn vectored_handler(ExceptionInfo: *mut EXCEPTION_POINTERS) -> LONG {
unsafe {
let rec = &(*(*ExceptionInfo).ExceptionRecord);
let code = rec.ExceptionCode;
if code != EXCEPTION_STACK_OVERFLOW {
return EXCEPTION_CONTINUE_SEARCH;
}
// We're calling into functions with stack checks,
// however stack checks by limit should be disabled on Windows
stack::record_sp_limit(0);
if get_task_guard_page().is_some() {
report();
}
EXCEPTION_CONTINUE_SEARCH
}
}
pub unsafe fn init() {
let mut info = mem::zeroed();
libc::GetSystemInfo(&mut info);
PAGE_SIZE = info.dwPageSize as uint;
if AddVectoredExceptionHandler(0, vectored_handler) == ptr::null_mut() {
panic!("failed to install exception handler");
}
mem::forget(make_handler());
}
pub unsafe fn cleanup() {
}
pub unsafe fn make_handler() -> Handler {
if SetThreadStackGuarantee(&mut 0x5000) == 0 {
panic!("failed to reserve stack space for exception handling");
}
super::Handler { _data: 0i as *mut libc::c_void }
}
pub unsafe fn drop_handler(_handler: &mut Handler) {
}
pub struct EXCEPTION_RECORD {
pub ExceptionCode: DWORD,
pub ExceptionFlags: DWORD,
pub ExceptionRecord: *mut EXCEPTION_RECORD,
pub ExceptionAddress: LPVOID,
pub NumberParameters: DWORD,
pub ExceptionInformation: [LPVOID, ..EXCEPTION_MAXIMUM_PARAMETERS]
}
pub struct EXCEPTION_POINTERS {
pub ExceptionRecord: *mut EXCEPTION_RECORD,
pub ContextRecord: LPVOID
}
pub type PVECTORED_EXCEPTION_HANDLER = extern "system"
fn(ExceptionInfo: *mut EXCEPTION_POINTERS) -> LONG;
pub type ULONG = libc::c_ulong;
const EXCEPTION_CONTINUE_SEARCH: LONG = 0;
const EXCEPTION_MAXIMUM_PARAMETERS: uint = 15;
const EXCEPTION_STACK_OVERFLOW: DWORD = 0xc00000fd;
extern "system" {
fn AddVectoredExceptionHandler(FirstHandler: ULONG,
VectoredHandler: PVECTORED_EXCEPTION_HANDLER)
-> LPVOID;
fn SetThreadStackGuarantee(StackSizeInBytes: *mut ULONG) -> BOOL;
}
}
#[cfg(any(target_os = "linux", target_os = "macos"))]
mod imp {
use core::prelude::*;
use super::super::stack;
use super::{Handler, get_task_guard_page, report};
use core::mem;
use core::ptr;
use core::intrinsics;
use self::signal::{siginfo, sigaction, SIGBUS, SIG_DFL,
SA_SIGINFO, SA_ONSTACK, sigaltstack,
SIGSTKSZ};
use libc;
use libc::funcs::posix88::mman::{mmap, munmap};
use libc::consts::os::posix88::{SIGSEGV,
PROT_READ,
PROT_WRITE,
MAP_PRIVATE,
MAP_ANON,
MAP_FAILED};
// This is initialized in init() and only read from after
static mut PAGE_SIZE: uint = 0;
#[no_stack_check]
unsafe extern fn signal_handler(signum: libc::c_int,
info: *mut siginfo,
_data: *mut libc::c_void) {
// We can not return from a SIGSEGV or SIGBUS signal.
// See: https://www.gnu.org/software/libc/manual/html_node/Handler-Returns.html
unsafe fn term(signum: libc::c_int) -> ! {
use core::mem::transmute;
signal(signum, transmute(SIG_DFL));
raise(signum);
intrinsics::abort();
}
// We're calling into functions with stack checks
stack::record_sp_limit(0);
match get_task_guard_page() {
Some(guard) => {
let addr = (*info).si_addr as uint;
if guard == 0 || addr < guard - PAGE_SIZE || addr >= guard {
term(signum);
}
report();
intrinsics::abort()
}
None => term(signum)
}
}
static mut MAIN_ALTSTACK: *mut libc::c_void = 0 as *mut libc::c_void;
pub unsafe fn init() {
let psize = libc::sysconf(libc::consts::os::sysconf::_SC_PAGESIZE);
if psize == -1 {
panic!("failed to get page size");
}
PAGE_SIZE = psize as uint;
let mut action: sigaction = mem::zeroed();
action.sa_flags = SA_SIGINFO | SA_ONSTACK;
action.sa_sigaction = signal_handler as sighandler_t;
sigaction(SIGSEGV, &action, ptr::null_mut());
sigaction(SIGBUS, &action, ptr::null_mut());
let handler = make_handler();
MAIN_ALTSTACK = handler._data;
mem::forget(handler);
}
pub unsafe fn cleanup() {
Handler { _data: MAIN_ALTSTACK };
}
pub unsafe fn make_handler() -> Handler {
let alt_stack = mmap(ptr::null_mut(),
signal::SIGSTKSZ,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON,
-1,
0);
if alt_stack == MAP_FAILED {
panic!("failed to allocate an alternative stack");
}
let mut stack: sigaltstack = mem::zeroed();
stack.ss_sp = alt_stack;
stack.ss_flags = 0;
stack.ss_size = SIGSTKSZ;
sigaltstack(&stack, ptr::null_mut());
Handler { _data: alt_stack }
}
pub unsafe fn drop_handler(handler: &mut Handler) {
munmap(handler._data, SIGSTKSZ);
}
type sighandler_t = *mut libc::c_void;
#[cfg(any(all(target_os = "linux", target_arch = "x86"), // may not match
all(target_os = "linux", target_arch = "x86_64"),
all(target_os = "linux", target_arch = "arm"), // may not match
target_os = "android"))] // may not match
mod signal {
use libc;
use super::sighandler_t;
pub static SA_ONSTACK: libc::c_int = 0x08000000;
pub static SA_SIGINFO: libc::c_int = 0x00000004;
pub static SIGBUS: libc::c_int = 7;
pub static SIGSTKSZ: libc::size_t = 8192;
pub static SIG_DFL: sighandler_t = 0i as sighandler_t;
// This definition is not as accurate as it could be, {si_addr} is
// actually a giant union. Currently we're only interested in that field,
// however.
#[repr(C)]
pub struct siginfo {
si_signo: libc::c_int,
si_errno: libc::c_int,
si_code: libc::c_int,
pub si_addr: *mut libc::c_void
}
#[repr(C)]
pub struct sigaction {
pub sa_sigaction: sighandler_t,
pub sa_mask: sigset_t,
pub sa_flags: libc::c_int,
sa_restorer: *mut libc::c_void,
}
#[cfg(target_word_size = "32")]
#[repr(C)]
pub struct sigset_t {
__val: [libc::c_ulong, ..32],
}
#[cfg(target_word_size = "64")]
#[repr(C)]
pub struct sigset_t {
__val: [libc::c_ulong, ..16],
}
#[repr(C)]
pub struct sigaltstack {
pub ss_sp: *mut libc::c_void,
pub ss_flags: libc::c_int,
pub ss_size: libc::size_t
}
}
#[cfg(target_os = "macos")]
mod signal {
use libc;
use super::sighandler_t;
pub const SA_ONSTACK: libc::c_int = 0x0001;
pub const SA_SIGINFO: libc::c_int = 0x0040;
pub const SIGBUS: libc::c_int = 10;
pub const SIGSTKSZ: libc::size_t = 131072;
pub const SIG_DFL: sighandler_t = 0i as sighandler_t;
pub type sigset_t = u32;
// This structure has more fields, but we're not all that interested in
// them.
#[repr(C)]
pub struct siginfo {
pub si_signo: libc::c_int,
pub si_errno: libc::c_int,
pub si_code: libc::c_int,
pub pid: libc::pid_t,
pub uid: libc::uid_t,
pub status: libc::c_int,
pub si_addr: *mut libc::c_void
}
#[repr(C)]
pub struct sigaltstack {
pub ss_sp: *mut libc::c_void,
pub ss_size: libc::size_t,
pub ss_flags: libc::c_int
}
#[repr(C)]
pub struct sigaction {
pub sa_sigaction: sighandler_t,
pub sa_mask: sigset_t,
pub sa_flags: libc::c_int,
}
}
extern {
pub fn signal(signum: libc::c_int, handler: sighandler_t) -> sighandler_t;
pub fn raise(signum: libc::c_int) -> libc::c_int;
pub fn sigaction(signum: libc::c_int,
act: *const sigaction,
oldact: *mut sigaction) -> libc::c_int;
pub fn sigaltstack(ss: *const sigaltstack,
oss: *mut sigaltstack) -> libc::c_int;
}
}
#[cfg(not(any(target_os = "linux",
target_os = "macos",
windows)))]
mod imp {
use libc;
pub unsafe fn init() {
}
pub unsafe fn cleanup() {
}
pub unsafe fn make_handler() -> super::Handler {
super::Handler { _data: 0i as *mut libc::c_void }
}
pub unsafe fn drop_handler(_handler: &mut super::Handler) {
}
}

View File

@ -0,0 +1,554 @@
// Copied from:
// rust/src/librustrt/thread.rs
// git: 7828c3dd2858d8f3a0448484d8093e22719dbda0
// Module paths updated to be under rust_wrapper, no other changes.
// TODO replace with proper runtime-less native threading once Rust gains
// support for this.
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Native os-thread management
//!
//! This modules contains bindings necessary for managing OS-level threads.
//! These functions operate outside of the rust runtime, creating threads
//! which are not used for scheduling in any way.
#![allow(non_camel_case_types)]
use core::prelude::*;
use alloc::boxed::Box;
use core::mem;
use core::uint;
use libc;
use rust_wrapper::stack;
use rust_wrapper::stack_overflow;
pub unsafe fn init() {
imp::guard::init();
stack_overflow::init();
}
pub unsafe fn cleanup() {
stack_overflow::cleanup();
}
#[cfg(target_os = "windows")]
type StartFn = extern "system" fn(*mut libc::c_void) -> imp::rust_thread_return;
#[cfg(not(target_os = "windows"))]
type StartFn = extern "C" fn(*mut libc::c_void) -> imp::rust_thread_return;
/// This struct represents a native thread's state. This is used to join on an
/// existing thread created in the join-able state.
pub struct Thread<T> {
native: imp::rust_thread,
joined: bool,
packet: Box<Option<T>>,
}
static DEFAULT_STACK_SIZE: uint = 1024 * 1024;
// This is the starting point of rust os threads. The first thing we do
// is make sure that we don't trigger __morestack (also why this has a
// no_stack_check annotation), and then we extract the main function
// and invoke it.
#[no_stack_check]
fn start_thread(main: *mut libc::c_void) -> imp::rust_thread_return {
unsafe {
stack::record_os_managed_stack_bounds(0, uint::MAX);
let handler = stack_overflow::Handler::new();
let f: Box<proc()> = mem::transmute(main);
(*f)();
drop(handler);
mem::transmute(0 as imp::rust_thread_return)
}
}
#[no_stack_check]
#[cfg(target_os = "windows")]
extern "system" fn thread_start(main: *mut libc::c_void) -> imp::rust_thread_return {
return start_thread(main);
}
#[no_stack_check]
#[cfg(not(target_os = "windows"))]
extern fn thread_start(main: *mut libc::c_void) -> imp::rust_thread_return {
return start_thread(main);
}
/// Returns the last writable byte of the main thread's stack next to the guard
/// page. Must be called from the main thread.
pub fn main_guard_page() -> uint {
unsafe {
imp::guard::main()
}
}
/// Returns the last writable byte of the current thread's stack next to the
/// guard page. Must not be called from the main thread.
pub fn current_guard_page() -> uint {
unsafe {
imp::guard::current()
}
}
// There are two impl blocks b/c if T were specified at the top then it's just a
// pain to specify a type parameter on Thread::spawn (which doesn't need the
// type parameter).
impl Thread<()> {
/// Starts execution of a new OS thread.
///
/// This function will not wait for the thread to join, but a handle to the
/// thread will be returned.
///
/// Note that the handle returned is used to acquire the return value of the
/// procedure `main`. The `join` function will wait for the thread to finish
/// and return the value that `main` generated.
///
/// Also note that the `Thread` returned will *always* wait for the thread
/// to finish executing. This means that even if `join` is not explicitly
/// called, when the `Thread` falls out of scope its destructor will block
/// waiting for the OS thread.
pub fn start<T: Send>(main: proc():Send -> T) -> Thread<T> {
Thread::start_stack(DEFAULT_STACK_SIZE, main)
}
/// Performs the same functionality as `start`, but specifies an explicit
/// stack size for the new thread.
pub fn start_stack<T: Send>(stack: uint, main: proc():Send -> T) -> Thread<T> {
// We need the address of the packet to fill in to be stable so when
// `main` fills it in it's still valid, so allocate an extra box to do
// so.
let packet = box None;
let packet2: *mut Option<T> = unsafe {
*mem::transmute::<&Box<Option<T>>, *const *mut Option<T>>(&packet)
};
let main = proc() unsafe { *packet2 = Some(main()); };
let native = unsafe { imp::create(stack, box main) };
Thread {
native: native,
joined: false,
packet: packet,
}
}
/// This will spawn a new thread, but it will not wait for the thread to
/// finish, nor is it possible to wait for the thread to finish.
///
/// This corresponds to creating threads in the 'detached' state on unix
/// systems. Note that platforms may not keep the main program alive even if
/// there are detached thread still running around.
pub fn spawn(main: proc():Send) {
Thread::spawn_stack(DEFAULT_STACK_SIZE, main)
}
/// Performs the same functionality as `spawn`, but explicitly specifies a
/// stack size for the new thread.
pub fn spawn_stack(stack: uint, main: proc():Send) {
unsafe {
let handle = imp::create(stack, box main);
imp::detach(handle);
}
}
/// Relinquishes the CPU slot that this OS-thread is currently using,
/// allowing another thread to run for awhile.
pub fn yield_now() {
unsafe { imp::yield_now(); }
}
}
impl<T: Send> Thread<T> {
/// Wait for this thread to finish, returning the result of the thread's
/// calculation.
pub fn join(mut self) -> T {
assert!(!self.joined);
unsafe { imp::join(self.native) };
self.joined = true;
assert!(self.packet.is_some());
self.packet.take().unwrap()
}
}
#[unsafe_destructor]
impl<T: Send> Drop for Thread<T> {
fn drop(&mut self) {
// This is required for correctness. If this is not done then the thread
// would fill in a return box which no longer exists.
if !self.joined {
unsafe { imp::join(self.native) };
}
}
}
#[cfg(windows)]
#[allow(non_snake_case)]
mod imp {
use core::prelude::*;
use alloc::boxed::Box;
use core::cmp;
use core::mem;
use core::ptr;
use libc;
use libc::types::os::arch::extra::{LPSECURITY_ATTRIBUTES, SIZE_T, BOOL,
LPVOID, DWORD, LPDWORD, HANDLE};
use stack::RED_ZONE;
pub type rust_thread = HANDLE;
pub type rust_thread_return = DWORD;
pub mod guard {
pub unsafe fn main() -> uint {
0
}
pub unsafe fn current() -> uint {
0
}
pub unsafe fn init() {
}
}
pub unsafe fn create(stack: uint, p: Box<proc():Send>) -> rust_thread {
let arg: *mut libc::c_void = mem::transmute(p);
// FIXME On UNIX, we guard against stack sizes that are too small but
// that's because pthreads enforces that stacks are at least
// PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's
// just that below a certain threshold you can't do anything useful.
// That threshold is application and architecture-specific, however.
// For now, the only requirement is that it's big enough to hold the
// red zone. Round up to the next 64 kB because that's what the NT
// kernel does, might as well make it explicit. With the current
// 20 kB red zone, that makes for a 64 kB minimum stack.
let stack_size = (cmp::max(stack, RED_ZONE) + 0xfffe) & (-0xfffe - 1);
let ret = CreateThread(ptr::null_mut(), stack_size as libc::size_t,
super::thread_start, arg, 0, ptr::null_mut());
if ret as uint == 0 {
// be sure to not leak the closure
let _p: Box<proc():Send> = mem::transmute(arg);
panic!("failed to spawn native thread: {}", ret);
}
return ret;
}
pub unsafe fn join(native: rust_thread) {
use libc::consts::os::extra::INFINITE;
WaitForSingleObject(native, INFINITE);
}
pub unsafe fn detach(native: rust_thread) {
assert!(libc::CloseHandle(native) != 0);
}
pub unsafe fn yield_now() {
// This function will return 0 if there are no other threads to execute,
// but this also means that the yield was useless so this isn't really a
// case that needs to be worried about.
SwitchToThread();
}
#[allow(non_snake_case)]
extern "system" {
fn CreateThread(lpThreadAttributes: LPSECURITY_ATTRIBUTES,
dwStackSize: SIZE_T,
lpStartAddress: super::StartFn,
lpParameter: LPVOID,
dwCreationFlags: DWORD,
lpThreadId: LPDWORD) -> HANDLE;
fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) -> DWORD;
fn SwitchToThread() -> BOOL;
}
}
#[cfg(unix)]
mod imp {
use core::prelude::*;
use alloc::boxed::Box;
use core::cmp;
use core::mem;
use core::ptr;
use libc::consts::os::posix01::{PTHREAD_CREATE_JOINABLE, PTHREAD_STACK_MIN};
use libc;
use super::super::stack::RED_ZONE;
pub type rust_thread = libc::pthread_t;
pub type rust_thread_return = *mut u8;
#[cfg(all(not(target_os = "linux"), not(target_os = "macos")))]
pub mod guard {
pub unsafe fn current() -> uint {
0
}
pub unsafe fn main() -> uint {
0
}
pub unsafe fn init() {
}
}
#[cfg(any(target_os = "linux", target_os = "macos"))]
pub mod guard {
use super::*;
#[cfg(any(target_os = "linux", target_os = "android"))]
use core::mem;
#[cfg(any(target_os = "linux", target_os = "android"))]
use core::ptr;
use libc;
use libc::funcs::posix88::mman::{mmap};
use libc::consts::os::posix88::{PROT_NONE,
MAP_PRIVATE,
MAP_ANON,
MAP_FAILED,
MAP_FIXED};
// These are initialized in init() and only read from after
static mut PAGE_SIZE: uint = 0;
static mut GUARD_PAGE: uint = 0;
#[cfg(target_os = "macos")]
unsafe fn get_stack_start() -> *mut libc::c_void {
current() as *mut libc::c_void
}
#[cfg(any(target_os = "linux", target_os = "android"))]
unsafe fn get_stack_start() -> *mut libc::c_void {
let mut attr: libc::pthread_attr_t = mem::zeroed();
if pthread_getattr_np(pthread_self(), &mut attr) != 0 {
panic!("failed to get thread attributes");
}
let mut stackaddr = ptr::null_mut();
let mut stacksize = 0;
if pthread_attr_getstack(&attr, &mut stackaddr, &mut stacksize) != 0 {
panic!("failed to get stack information");
}
if pthread_attr_destroy(&mut attr) != 0 {
panic!("failed to destroy thread attributes");
}
stackaddr
}
pub unsafe fn init() {
let psize = libc::sysconf(libc::consts::os::sysconf::_SC_PAGESIZE);
if psize == -1 {
panic!("failed to get page size");
}
PAGE_SIZE = psize as uint;
let stackaddr = get_stack_start();
// Rellocate the last page of the stack.
// This ensures SIGBUS will be raised on
// stack overflow.
let result = mmap(stackaddr,
PAGE_SIZE as libc::size_t,
PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-1,
0);
if result != stackaddr || result == MAP_FAILED {
panic!("failed to allocate a guard page");
}
let offset = if cfg!(target_os = "linux") {
2
} else {
1
};
GUARD_PAGE = stackaddr as uint + offset * PAGE_SIZE;
}
pub unsafe fn main() -> uint {
GUARD_PAGE
}
#[cfg(target_os = "macos")]
pub unsafe fn current() -> uint {
(pthread_get_stackaddr_np(pthread_self()) as libc::size_t -
pthread_get_stacksize_np(pthread_self())) as uint
}
#[cfg(any(target_os = "linux", target_os = "android"))]
pub unsafe fn current() -> uint {
let mut attr: libc::pthread_attr_t = mem::zeroed();
if pthread_getattr_np(pthread_self(), &mut attr) != 0 {
panic!("failed to get thread attributes");
}
let mut guardsize = 0;
if pthread_attr_getguardsize(&attr, &mut guardsize) != 0 {
panic!("failed to get stack guard page");
}
if guardsize == 0 {
panic!("there is no guard page");
}
let mut stackaddr = ptr::null_mut();
let mut stacksize = 0;
if pthread_attr_getstack(&attr, &mut stackaddr, &mut stacksize) != 0 {
panic!("failed to get stack information");
}
if pthread_attr_destroy(&mut attr) != 0 {
panic!("failed to destroy thread attributes");
}
stackaddr as uint + guardsize as uint
}
}
pub unsafe fn create(stack: uint, p: Box<proc():Send>) -> rust_thread {
let mut native: libc::pthread_t = mem::zeroed();
let mut attr: libc::pthread_attr_t = mem::zeroed();
assert_eq!(pthread_attr_init(&mut attr), 0);
assert_eq!(pthread_attr_setdetachstate(&mut attr,
PTHREAD_CREATE_JOINABLE), 0);
// Reserve room for the red zone, the runtime's stack of last resort.
let stack_size = cmp::max(stack, RED_ZONE + min_stack_size(&attr) as uint);
match pthread_attr_setstacksize(&mut attr, stack_size as libc::size_t) {
0 => {
},
libc::EINVAL => {
// EINVAL means |stack_size| is either too small or not a
// multiple of the system page size. Because it's definitely
// >= PTHREAD_STACK_MIN, it must be an alignment issue.
// Round up to the nearest page and try again.
let page_size = libc::sysconf(libc::_SC_PAGESIZE) as uint;
let stack_size = (stack_size + page_size - 1) &
(-(page_size as int - 1) as uint - 1);
assert_eq!(pthread_attr_setstacksize(&mut attr, stack_size as libc::size_t), 0);
},
errno => {
// This cannot really happen.
panic!("pthread_attr_setstacksize() error: {}", errno);
},
};
let arg: *mut libc::c_void = mem::transmute(p);
let ret = pthread_create(&mut native, &attr, super::thread_start, arg);
assert_eq!(pthread_attr_destroy(&mut attr), 0);
if ret != 0 {
// be sure to not leak the closure
let _p: Box<proc():Send> = mem::transmute(arg);
panic!("failed to spawn native thread: {}", ret);
}
native
}
pub unsafe fn join(native: rust_thread) {
assert_eq!(pthread_join(native, ptr::null_mut()), 0);
}
pub unsafe fn detach(native: rust_thread) {
assert_eq!(pthread_detach(native), 0);
}
pub unsafe fn yield_now() { assert_eq!(sched_yield(), 0); }
// glibc >= 2.15 has a __pthread_get_minstack() function that returns
// PTHREAD_STACK_MIN plus however many bytes are needed for thread-local
// storage. We need that information to avoid blowing up when a small stack
// is created in an application with big thread-local storage requirements.
// See #6233 for rationale and details.
//
// Link weakly to the symbol for compatibility with older versions of glibc.
// Assumes that we've been dynamically linked to libpthread but that is
// currently always the case. Note that you need to check that the symbol
// is non-null before calling it!
#[cfg(target_os = "linux")]
fn min_stack_size(attr: *const libc::pthread_attr_t) -> libc::size_t {
type F = unsafe extern "C" fn(*const libc::pthread_attr_t) -> libc::size_t;
extern {
#[linkage = "extern_weak"]
static __pthread_get_minstack: *const ();
}
if __pthread_get_minstack.is_null() {
PTHREAD_STACK_MIN
} else {
unsafe { mem::transmute::<*const (), F>(__pthread_get_minstack)(attr) }
}
}
// __pthread_get_minstack() is marked as weak but extern_weak linkage is
// not supported on OS X, hence this kludge...
#[cfg(not(target_os = "linux"))]
fn min_stack_size(_: *const libc::pthread_attr_t) -> libc::size_t {
PTHREAD_STACK_MIN
}
#[cfg(any(target_os = "linux"))]
extern {
pub fn pthread_self() -> libc::pthread_t;
pub fn pthread_getattr_np(native: libc::pthread_t,
attr: *mut libc::pthread_attr_t) -> libc::c_int;
pub fn pthread_attr_getguardsize(attr: *const libc::pthread_attr_t,
guardsize: *mut libc::size_t) -> libc::c_int;
pub fn pthread_attr_getstack(attr: *const libc::pthread_attr_t,
stackaddr: *mut *mut libc::c_void,
stacksize: *mut libc::size_t) -> libc::c_int;
}
#[cfg(target_os = "macos")]
extern {
pub fn pthread_self() -> libc::pthread_t;
pub fn pthread_get_stackaddr_np(thread: libc::pthread_t) -> *mut libc::c_void;
pub fn pthread_get_stacksize_np(thread: libc::pthread_t) -> libc::size_t;
}
extern {
fn pthread_create(native: *mut libc::pthread_t,
attr: *const libc::pthread_attr_t,
f: super::StartFn,
value: *mut libc::c_void) -> libc::c_int;
fn pthread_join(native: libc::pthread_t,
value: *mut *mut libc::c_void) -> libc::c_int;
fn pthread_attr_init(attr: *mut libc::pthread_attr_t) -> libc::c_int;
pub fn pthread_attr_destroy(attr: *mut libc::pthread_attr_t) -> libc::c_int;
fn pthread_attr_setstacksize(attr: *mut libc::pthread_attr_t,
stack_size: libc::size_t) -> libc::c_int;
fn pthread_attr_setdetachstate(attr: *mut libc::pthread_attr_t,
state: libc::c_int) -> libc::c_int;
fn pthread_detach(thread: libc::pthread_t) -> libc::c_int;
fn sched_yield() -> libc::c_int;
}
}
#[cfg(test)]
mod tests {
use super::Thread;
#[test]
fn smoke() { Thread::start(proc (){}).join(); }
#[test]
fn data() { assert_eq!(Thread::start(proc () { 1i }).join(), 1); }
#[test]
fn detached() { Thread::spawn(proc () {}) }
#[test]
fn small_stacks() {
assert_eq!(42i, Thread::start_stack(0, proc () 42i).join());
assert_eq!(42i, Thread::start_stack(1, proc () 42i).join());
}
}