How to use D without the GC ?
evilrat
evilrat666 at gmail.com
Wed Jun 12 18:58:49 UTC 2024
On Wednesday, 12 June 2024 at 17:00:14 UTC, Vinod K Chandran
wrote:
> On Wednesday, 12 June 2024 at 10:16:26 UTC, Sergey wrote:
>>
>> Btw are you going to use PyD or doing everything manually from
>> scratch?
>>
> Does PyD active now ? I didn't tested it. My approach is using
> "ctypes" library with my dll. Ctypes is the fastes FFI in my
> experience. I tested Cython, Pybind11 and CFFI. But None can
> beat the speed of ctypes. Currently the fastest experiments
> were the dlls created in Odin & C3. Both are non-GC languages.
It is probably not that well maintained, but it definitely works
with python 3.10 and maybe even 3.11, i use it to interface with
pytorch and numpy and PIL, but my use case is pretty simple, i
just write some wrapper python functions to run inference and
pass images back and forth using embedded py_stmts. the only
problem is that it seems to leak a lot PydObjects so i have to
manually free them, even scope doesn't helps with that which is
sad.
example classifier python
```python
def inference(image: Image):
""" Predicts the image class and returns confidences for
every class
To get the class one can use the following code
> conf = inference(image)
> index = conf.argmax()
> cls = classes[index]
"""
# this detector doesn't works with more than 3 channels
ch = len(image.getbands())
has_transparency = image.info.get('transparency', None) is
not None
if ch > 3 or has_transparency:
image = image.convert("RGB")
image_tensor = prep_transform(image).float()
image_tensor = image_tensor.unsqueeze_(0)
# it is fast enough to run on CPU
#if torch.cuda.is_available():
# image_tensor.cuda()
with torch.inference_mode():
# NOTE: read the comment on model
output = model(image_tensor)
index = output.data.numpy()
return index
```
and some of D functions
```d
ImageData aiGoesBrrrr(string path, int strength = 50) {
try {
if (!pymod)
py_stmts("import sys;
sys.path.append('modules/xyz')");
initOnce!pymod(py_import("xyz.inference"));
if (!pymod.hasattr("model"))
pymod.model = pymod.method("load_model",
"modules/xyz/pre_trained/weights.pth");
PydObject ipath = py(path);
scope(exit) destroy(ipath);
auto context = new InterpContext();
context.path = ipath;
context.py_stmts("
from PIL import Image
image = Image.open(path)
ch = len(image.getbands())
if ch > 3:
image = image.convert('RGB')
");
// signature: def run(model, imagepath, alpha=45) ->
numpy.Array
PydObject output = pymod.method("run", pymod.model,
context.image, 100-strength);
context.output = output;
scope(exit) destroy(output);
PydObject shape = output.getattr("shape");
scope(exit) destroy(shape);
// int n = ...;
int c = shape[2].to_d!int;
int w = shape[1].to_d!int;
int h = shape[0].to_d!int;
// numpy array
void* raw_ptr = output.buffer_view().item_ptr([0,0,0]);
ubyte* d_ptr = cast(ubyte*) raw_ptr;
ubyte[] d_img = d_ptr[0..h*w*c];
return ImageData(d_img.dup, h ,w ,c);
} catch (PythonException e) {
// oh no...
auto context = new InterpContext();
context.trace = new PydObject(e.traceback);
context.py_stmts("from traceback import format_tb; trace
= format_tb(trace)");
printerr(e.py_message, "\n", context.trace.to_d!string);
}
return ImageData.init;
```
More information about the Digitalmars-d-learn
mailing list