Skip to content

Commit

Permalink
chore: still freeing somewhere
Browse files Browse the repository at this point in the history
  • Loading branch information
FL33TW00D committed Jan 21, 2024
1 parent 1859f9e commit 21cbe5b
Show file tree
Hide file tree
Showing 3 changed files with 40 additions and 36 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/rust.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,6 @@ jobs:
- name: Build
run: cargo build
- name: Run tests
run: cargo test
run: cargo test -- --nocapture
- name: Run integration tests
run: (cd crates/ratchet-integration-tests;sh run-tests.sh)
2 changes: 2 additions & 0 deletions crates/ratchet-core/src/storage/cpu_buffer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,10 @@ impl CPUBuffer {

pub fn deep_clone(&self) -> Self {
let (ptr, layout) = self.inner().into_raw_parts();
println!("before deep clone: {:p}", ptr);
let alloc = unsafe { std::alloc::alloc(layout) };
unsafe { ptr.copy_to_nonoverlapping(alloc, layout.size()) };
println!("after deep clone: {:p}", alloc);

Self::from_raw_parts(alloc, layout)
}
Expand Down
72 changes: 37 additions & 35 deletions crates/ratchet-core/src/tensor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -340,6 +340,7 @@ impl Tensor {
let storage_guard = self.storage();
let buffer = storage_guard.as_ref().unwrap().try_cpu().unwrap();
let (ptr, _) = buffer.inner().into_raw_parts();
println!("INTO NDARRAY: {:?}", ptr);
unsafe { ArrayViewD::from_shape_ptr(shape, ptr as *const T).to_owned() }
} else {
ArrayViewD::from_shape(shape, &[]).unwrap().to_owned()
Expand Down Expand Up @@ -426,45 +427,46 @@ mod tests {
Ok(())
}

/*
#[test]
fn test_pyo3() -> anyhow::Result<()> {
let cpu_device = Device::request_device(DeviceRequest::CPU)?;
let a = Tensor::randn::<f32>(shape![1024, 1024], cpu_device.clone());
let b = Tensor::randn::<f32>(shape![1024, 1024], cpu_device.clone());
let ground: anyhow::Result<Tensor> = Python::with_gil(|py| {
let prg = PyModule::from_code(
py,
r#"
import torch
def matmul(a, b):
return torch.matmul(torch.from_numpy(a), torch.from_numpy(b)).numpy()
#[test]
fn test_pyo3() -> anyhow::Result<()> {
let cpu_device = Device::request_device(DeviceRequest::CPU)?;
let a = Tensor::randn::<f32>(shape![1024, 1024], cpu_device.clone());
let b = Tensor::randn::<f32>(shape![1024, 1024], cpu_device.clone());

let ground: anyhow::Result<Tensor> = Python::with_gil(|py| {
let prg = PyModule::from_code(
py,
r#"
import torch
def matmul(a, b):
return torch.matmul(torch.from_numpy(a), torch.from_numpy(b)).numpy()
"#,
"x.py",
"x",
)?;
"x.py",
"x",
)?;

let result = prg
.getattr("matmul")?
.call1((a.to_py::<f32>(&py), b.to_py::<f32>(&py)))?
.extract::<&PyArrayDyn<f32>>()?;
Ok(Tensor::from(result))
});
println!("\nTORCH: {:#?}", ground);

let result = prg
.getattr("matmul")?
.call1((a.to_py::<f32>(&py), b.to_py::<f32>(&py)))?
.extract::<&PyArrayDyn<f32>>()?;
Ok(Tensor::from(result))
});
println!("\nA: {:#?}", a);
println!("\nB: {:#?}", b);

let gpu_device = Device::request_device(DeviceRequest::GPU)?;
let a = a.to(gpu_device.clone())?;
let b = b.to(gpu_device)?;
let gpu_device = Device::request_device(DeviceRequest::GPU)?;
let a = a.to(gpu_device.clone())?;
let b = b.to(gpu_device)?;

let c = a.matmul(&b)?;
c.resolve()?;
let c = a.matmul(&b)?;
c.resolve()?;

let our_result = c.to(cpu_device)?;
println!("\nTORCH: {:#?}", ground);
println!("\nOURS: {:#?}", our_result);
let our_result = c.to(cpu_device)?;
println!("\nOURS: {:#?}", our_result);

Ok(())
}
*/
Ok(())
}
}

0 comments on commit 21cbe5b

Please sign in to comment.