@@ -175,7 +175,7 @@ fn c_int Ring.get_cqe_internal(Ring *ring, Cqe** cqe_ptr, u32 submit, u32 wait_n
175175/*
176176 * Must be called after io_uring_for_each_cqe()
177177 */
178- fn void Ring.cq_advance(Ring *ring, u32 nr) @(cname="io_uring_cq_advance")
178+ fn void Ring.cq_advance(Ring *ring, u32 nr) // @(cname="io_uring_cq_advance")
179179{
180180 if (nr) {
181181 Cq* cq = &ring.cq;
@@ -194,26 +194,26 @@ fn void Ring.cq_advance(Ring *ring, u32 nr) @(cname="io_uring_cq_advance")
194194 * Must be called after io_uring_{peek,wait}_cqe() after the cqe has
195195 * been processed by the application.
196196 */
197- fn void Ring.cqe_seen(Ring* ring, Cqe* cqe) @(cname="io_uring_cqe_seen")
197+ fn void Ring.cqe_seen(Ring* ring, Cqe* cqe) // @(cname="io_uring_cqe_seen")
198198{
199199 if (cqe) ring.cq_advance(1);
200200}
201201
202202/*
203203 * Command prep helpers
204204 */
205- fn void Sqe.set_data(Sqe* sqe, void* data) @(cname="io_uring_sqe_set_data")
205+ fn void Sqe.set_data(Sqe* sqe, void* data) // @(cname="io_uring_sqe_set_data")
206206{
207207 sqe.user_data = data;
208208}
209209
210- fn void *Cqe.get_data(const Cqe* cqe) @(cname="io_uring_sqe_get_data")
210+ fn void *Cqe.get_data(const Cqe* cqe) // @(cname="io_uring_sqe_get_data")
211211{
212212 //return (void *) (uintptr_t) cqe->user_data
213213 return cqe.user_data;
214214}
215215
216- fn void Sqe.set_flags(Sqe* sqe, u32 flags) @(cname="io_uring_sqe_set_flags")
216+ fn void Sqe.set_flags(Sqe* sqe, u32 flags) // @(cname="io_uring_sqe_set_flags")
217217{
218218 sqe.flags = (u8)flags;
219219}
@@ -227,7 +227,7 @@ static inline void __io_uring_set_target_fixed_file(struct io_uring_sqe *sqe,
227227}
228228#endif
229229
230- fn void prep_rw(c_int op, Sqe* sqe, c_int fd, const void* addr, u32 len, u64 offset) @(cname="io_uring_prep_rw")
230+ fn void prep_rw(c_int op, Sqe* sqe, c_int fd, const void* addr, u32 len, u64 offset) // @(cname="io_uring_prep_rw")
231231{
232232 sqe.opcode = (u8)op;
233233 sqe.flags = 0;
@@ -289,23 +289,23 @@ static inline void io_uring_prep_tee(struct io_uring_sqe *sqe,
289289}
290290#endif
291291
292- fn void Sqe.prep_readv(Sqe* sqe, c_int fd, const uio.Iovec* iovecs, u32 nr_vecs, u64 offset) @(cname="io_uring_prep_readv")
292+ fn void Sqe.prep_readv(Sqe* sqe, c_int fd, const uio.Iovec* iovecs, u32 nr_vecs, u64 offset) // @(cname="io_uring_prep_readv")
293293{
294294 prep_rw(Op.READV, sqe, fd, iovecs, nr_vecs, offset);
295295}
296296
297- fn void Sqe.prep_read_fixed(Sqe* sqe, c_int fd, void* buf, u32 nbytes, u64 offset, c_int buf_index) @(cname="io_uring_prep_read_fixed")
297+ fn void Sqe.prep_read_fixed(Sqe* sqe, c_int fd, void* buf, u32 nbytes, u64 offset, c_int buf_index) // @(cname="io_uring_prep_read_fixed")
298298{
299299 prep_rw(Op.READ_FIXED, sqe, fd, buf, nbytes, offset);
300300 sqe.buf_index = (u16)buf_index;
301301}
302302
303- fn void Sqe.prep_writev(Sqe* sqe, c_int fd, const uio.Iovec* iovecs, u32 nr_vecs, u64 offset) @(cname="io_uring_prep_writev")
303+ fn void Sqe.prep_writev(Sqe* sqe, c_int fd, const uio.Iovec* iovecs, u32 nr_vecs, u64 offset) // @(cname="io_uring_prep_writev")
304304{
305305 prep_rw(Op.WRITEV, sqe, fd, iovecs, nr_vecs, offset);
306306}
307307
308- fn void Sqe.prep_write_fixed(Sqe* sqe, c_int fd, const void* buf, u32 nbytes, u64 offset, c_int buf_index) @(cname="io_uring_prep_write_fixed")
308+ fn void Sqe.prep_write_fixed(Sqe* sqe, c_int fd, const void* buf, u32 nbytes, u64 offset, c_int buf_index) // @(cname="io_uring_prep_write_fixed")
309309{
310310 prep_rw(Op.WRITE_FIXED, sqe, fd, buf, nbytes, offset);
311311 sqe.buf_index = (u16)buf_index;
@@ -372,26 +372,26 @@ static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd,
372372}
373373#endif
374374
375- fn void Sqe.prep_nop(Sqe* sqe) @(cname="io_uring_prep_nop")
375+ fn void Sqe.prep_nop(Sqe* sqe) // @(cname="io_uring_prep_nop")
376376{
377377 prep_rw(Op.NOP, sqe, -1, nil, 0, 0);
378378}
379379
380380fn void Sqe.prep_timeout(Sqe* sqe, linux_types.Timespec* ts,
381- u32 count, u32 flags) @(cname="io_uring_prep_timeout")
381+ u32 count, u32 flags) // @(cname="io_uring_prep_timeout")
382382{
383383 prep_rw(Op.TIMEOUT, sqe, -1, ts, 1, count);
384384 sqe.timeout_flags = flags;
385385}
386386
387- fn void Sqe.prep_timeout_remove(Sqe* sqe, u64 user_data, u32 flags) @(cname="io_uring_prep_timeout_remove")
387+ fn void Sqe.prep_timeout_remove(Sqe* sqe, u64 user_data, u32 flags) // @(cname="io_uring_prep_timeout_remove")
388388{
389389 //prep_rw(Op.TIMEOUT_REMOVE, sqe, -1, (void *)(unsigned long)user_data, 0, 0);
390390 prep_rw(Op.TIMEOUT_REMOVE, sqe, -1, user_data, 0, 0);
391391 sqe.timeout_flags = flags;
392392}
393393
394- fn void Sqe.prep_timeout_update(Sqe* sqe, linux_types.Timespec* ts, u64 user_data, u32 flags) @(cname="io_uring_prep_timeout_update")
394+ fn void Sqe.prep_timeout_update(Sqe* sqe, linux_types.Timespec* ts, u64 user_data, u32 flags) // @(cname="io_uring_prep_timeout_update")
395395{
396396 prep_rw(Op.TIMEOUT_REMOVE, sqe, -1, (void*)user_data, 0, (usize)ts);
397397 sqe.timeout_flags = flags | TIMEOUT_UPDATE;
@@ -400,7 +400,7 @@ fn void Sqe.prep_timeout_update(Sqe* sqe, linux_types.Timespec* ts, u64 user_dat
400400fn void Sqe.prep_accept(Sqe* sqe, c_int fd,
401401 sys_socket.Sockaddr* addr,
402402 u32* addrlen,
403- c_int flags) @(cname="io_uring_prep_accept")
403+ c_int flags) // @(cname="io_uring_prep_accept")
404404{
405405 prep_rw(Op.ACCEPT, sqe, fd, addr, 0, (u64)addrlen);
406406 sqe.accept_flags = (u32)flags;
@@ -418,14 +418,14 @@ static inline void io_uring_prep_accept_direct(struct io_uring_sqe *sqe, int fd,
418418}
419419#endif
420420
421- fn void Sqe.prep_cancel(Sqe* sqe, void* user_data, c_int flags) @(cname="io_uring_prep_cancel")
421+ fn void Sqe.prep_cancel(Sqe* sqe, void* user_data, c_int flags) // @(cname="io_uring_prep_cancel")
422422{
423423 prep_rw(Op.ASYNC_CANCEL, sqe, -1, user_data, 0, 0);
424424 sqe.cancel_flags = (u32)flags;
425425}
426426
427427fn void Sqe.prep_link_timeout(Sqe* sqe, linux_types.Timespec* ts,
428- u32 flags) @(cname="io_uring_prep_link_timeout")
428+ u32 flags) // @(cname="io_uring_prep_link_timeout")
429429{
430430 prep_rw(Op.LINK_TIMEOUT, sqe, -1, ts, 1, 0);
431431 sqe.timeout_flags = flags;
@@ -475,17 +475,17 @@ static inline void io_uring_prep_openat_direct(struct io_uring_sqe *sqe,
475475
476476#endif
477477
478- fn void Sqe.prep_close(Sqe* sqe, c_int fd) @(cname="io_uring_prep_close")
478+ fn void Sqe.prep_close(Sqe* sqe, c_int fd) // @(cname="io_uring_prep_close")
479479{
480480 prep_rw(Op.CLOSE, sqe, fd, nil, 0, 0);
481481}
482482
483- fn void Sqe.prep_read(Sqe* sqe, c_int fd, void* buf, u32 nbytes, u64 offset) @(cname="io_uring_prep_read")
483+ fn void Sqe.prep_read(Sqe* sqe, c_int fd, void* buf, u32 nbytes, u64 offset) // @(cname="io_uring_prep_read")
484484{
485485 prep_rw(Op.READ, sqe, fd, buf, nbytes, offset);
486486}
487487
488- fn void Sqe.prep_write(Sqe* sqe, c_int fd, const void* buf, u32 nbytes, u64 offset) @(cname="io_uring_prep_write")
488+ fn void Sqe.prep_write(Sqe* sqe, c_int fd, const void* buf, u32 nbytes, u64 offset) // @(cname="io_uring_prep_write")
489489{
490490 prep_rw(Op.WRITE, sqe, fd, buf, nbytes, offset);
491491}
@@ -720,7 +720,7 @@ static inline int io_uring_cq_eventfd_toggle(struct io_uring *ring,
720720 * readily available. Returns 0 with cqe_ptr filled in on success, -errno on
721721 * failure.
722722 */
723- fn c_int Ring.wait_cqe_nr(Ring *ring, Cqe** cqe_ptr, u32 wait_nr) @(cname="io_uring_wait_cqe_nr")
723+ fn c_int Ring.wait_cqe_nr(Ring *ring, Cqe** cqe_ptr, u32 wait_nr) // @(cname="io_uring_wait_cqe_nr")
724724{
725725 return Ring.get_cqe_internal(ring, cqe_ptr, 0, wait_nr, nil);
726726}
@@ -729,7 +729,7 @@ fn c_int Ring.wait_cqe_nr(Ring *ring, Cqe** cqe_ptr, u32 wait_nr) @(cname="io_ur
729729 * Return an IO completion, if one is readily available. Returns 0 with
730730 * cqe_ptr filled in on success, -errno on failure.
731731 */
732- fn c_int Ring.peek_cqe(Ring* ring, Cqe** cqe_ptr) @(cname="io_uring_peek_cqe")
732+ fn c_int Ring.peek_cqe(Ring* ring, Cqe** cqe_ptr) // @(cname="io_uring_peek_cqe")
733733{
734734 return ring.wait_cqe_nr(cqe_ptr, 0);
735735}
@@ -738,7 +738,7 @@ fn c_int Ring.peek_cqe(Ring* ring, Cqe** cqe_ptr) @(cname="io_uring_peek_cqe")
738738 * Return an IO completion, waiting for it if necessary. Returns 0 with
739739 * cqe_ptr filled in on success, -errno on failure.
740740 */
741- fn c_int Ring.wait_cqe(Ring* ring, Cqe **cqe_ptr) @(cname="io_uring_wait_cqe")
741+ fn c_int Ring.wait_cqe(Ring* ring, Cqe **cqe_ptr) // @(cname="io_uring_wait_cqe")
742742{
743743 return ring.wait_cqe_nr(cqe_ptr, 1);
744744}
0 commit comments