Skip to content
Snippets Groups Projects
Commit 7187cc16 authored by Neil Gershenfeld's avatar Neil Gershenfeld
Browse files

wip

parent e3439d09
Branches
No related tags found
No related merge requests found
Pipeline #47066 passed
......@@ -15,6 +15,7 @@
|1,090|[numbapig.py](Python/numbapig.py)|Python, Numba, CUDA, 5120 cores|NVIDIA V100|March, 2020|
|1,062|[taichipi.py](Python/taichipi.py)|Python, Taichi, 5120 cores|NVIDIA V100|March, 2023|
|811|prior|Cray XT4|C, MPI, 2048 processes|prior|
|484|[threadpi.rs](Rust/threadpi.rs)|Rust, 96 threads and cores<br>cargo run --release -- 96|Graviton4|February, 2020|
|315|[numbapip.py](Python/numbapip.py)|Python, Numba, parallel, fastmath<br>96 cores|Intel 2x Xeon Platinum 8175M|February, 2020|
|272|[threadpi.c](C/threadpi.c)|C, 96 threads<br>gcc threadpi.c -o threadpi -O3 -ffast-math -pthread|Intel 2x Xeon Platinum 8175M|June, 2019|
|267|[threadpi.cpp](C++/threadpi.cpp)|C++, 96 threads<br>g++ threadpi.cpp -o threadpi -O3 -ffast-math -pthread|Intel 2x Xeon Platinum 8175M|March, 2020|
......@@ -31,9 +32,11 @@
|15.7|[clusterpi.js](Node/clusterpi.js)|Node, 6 workers|Intel i7-8700T|December, 2018|
|9.37|[pi.c](C/pi.c)|C<br>gcc pi.c -o pi -lm -O3 -ffast-math|Intel i7-8700T|November, 2018|
|4.87|[numbapi.py](Python/numbapi.py)|Python, Numba|Intel i7-8700T|February, 2020|
|4.63|[pi.c](C/pi.c)|C<br>gcc pi.c -o pi -lm -O3|Intel i7-8700T|December, 2024|
|3.73|[pi.html](https://pub.pages.cba.mit.edu/pi/JavaScript/pi.html)|JavaScript, 1 worker|Intel i7-8700T|November, 2018|
|3.47|[pi.html](https://pub.pages.cba.mit.edu/pi/JavaScript/pi.html)|JavaScript, 1 worker|Intel 2x E5-2680|November, 2018|
|3.29|[pi.js](Node/pi.js)|Node|Intel i7-8700T|December, 2018|
|3.19|[pi.rs](Rust/pi.rs)|Rust<br>cargo run --release|Intel i7-8700T|December, 2024|
|3.12|[clusterpi.js](Node/clusterpi.js)|Node, 1 worker|Intel i7-8700T|December, 2018|
|1.78|[threadpi.c](C/threadpi.c)|C, 4 threads<br>gcc threadpi.c -o threadpi -O3 -ffast-math -pthread|Raspberry Pi 4|December, 2020|
|1.21|[cupi.py](Python/cupi.py)|Python, CuPy, 5120 cores|NVIDIA V100|March, 2023|
......
/*
* pi.rs
* Neil Gershenfeld 12/21/24
* Rust pi calculation benchmark
* pi = 3.14159265358979323846
*/
use std::time::SystemTime;
const NPTS:u64 = 1e9 as u64;
fn main() {
let a:f64 = 0.5;
let b:f64 = 0.75;
let c:f64 = 0.25;
let mut pi:f64 = 0.0;
let start = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_micros();
for i in 1..=NPTS {
pi += a/(((i as f64)-b)*((i as f64)-c));
}
let end = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_micros();
let dt = ((end-start) as f64)/1e6;
let mflops = (NPTS as f64)*5.0/((dt as f64)*1e6);
println!("NPTS = {NPTS}, pi = {pi}");
println!("time = {dt:.3}, estimated MFlops = {mflops:.0}");
}
/*
* threadpi.rs
* Neil Gershenfeld 12/22/24
* Rust threads parallel pi calculation benchmark
* pi = 3.14159265358979323846
*/
use std::thread;
use std::env;
use std::process;
use std::time::SystemTime;
const NPTS:u64 = 1e9 as u64;
fn main() {
let args:Vec<String> = env::args().collect();
if args.len() != 2 {
println!("command line: threadpi.rs number_of_threads");
process::exit(-1);
}
let num_threads:u64 = args[1].parse().unwrap();
let a:f64 = 0.5;
let b:f64 = 0.75;
let c:f64 = 0.25;
let start = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_micros();
let mut handles = Vec::new();
for i in 0..num_threads {
handles.push(
thread::spawn(move || {
let start:u64 = 1+NPTS*i;
let end:u64 = NPTS*(i+1);
let mut partial_pi:f64 =0.0;
for i in start..=end {
partial_pi += a/(((i as f64)-b)*((i as f64)-c));
}
return partial_pi;
}));
}
let mut pi:f64 = 0.0;
for handle in handles {
pi += handle.join().unwrap();
}
let end = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_micros();
let dt = ((end-start) as f64)/1e6;
let mflops = (num_threads as f64)*(NPTS as f64)*5.0/((dt as f64)*1e6);
println!("NPTS = {NPTS}, threads = {num_threads}, pi = {pi}");
println!("time = {dt:.3}, estimated MFlops = {mflops:.0}");
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment