summaryrefslogtreecommitdiff
path: root/lib/mlibc/sysdeps/managarm/generic
diff options
context:
space:
mode:
Diffstat (limited to 'lib/mlibc/sysdeps/managarm/generic')
-rw-r--r--lib/mlibc/sysdeps/managarm/generic/drm.cpp1176
-rw-r--r--lib/mlibc/sysdeps/managarm/generic/ensure.cpp38
-rw-r--r--lib/mlibc/sysdeps/managarm/generic/entry.cpp132
-rw-r--r--lib/mlibc/sysdeps/managarm/generic/file.cpp2526
-rw-r--r--lib/mlibc/sysdeps/managarm/generic/fork-exec.cpp744
-rw-r--r--lib/mlibc/sysdeps/managarm/generic/ioctl.cpp708
-rw-r--r--lib/mlibc/sysdeps/managarm/generic/memory.cpp30
-rw-r--r--lib/mlibc/sysdeps/managarm/generic/mount.cpp44
-rw-r--r--lib/mlibc/sysdeps/managarm/generic/net.cpp57
-rw-r--r--lib/mlibc/sysdeps/managarm/generic/sched.cpp102
-rw-r--r--lib/mlibc/sysdeps/managarm/generic/signals.cpp139
-rw-r--r--lib/mlibc/sysdeps/managarm/generic/socket.cpp423
-rw-r--r--lib/mlibc/sysdeps/managarm/generic/time.cpp81
13 files changed, 6200 insertions, 0 deletions
diff --git a/lib/mlibc/sysdeps/managarm/generic/drm.cpp b/lib/mlibc/sysdeps/managarm/generic/drm.cpp
new file mode 100644
index 0000000..805c366
--- /dev/null
+++ b/lib/mlibc/sysdeps/managarm/generic/drm.cpp
@@ -0,0 +1,1176 @@
+#include <drm/drm_fourcc.h>
+#include <drm/drm.h>
+
+#include <bits/ensure.h>
+#include <mlibc/all-sysdeps.hpp>
+#include <mlibc/allocator.hpp>
+#include <mlibc/debug.hpp>
+#include <mlibc/posix-pipe.hpp>
+
+#include <fs.frigg_bragi.hpp>
+
+namespace mlibc {
+
+int ioctl_drm(int fd, unsigned long request, void *arg, int *result, HelHandle handle) {
+ managarm::fs::IoctlRequest<MemoryAllocator> ioctl_req(getSysdepsAllocator());
+
+ switch(request) {
+ case DRM_IOCTL_VERSION: {
+ auto param = reinterpret_cast<drm_version*>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ param->version_major = resp.drm_version_major();
+ param->version_minor = resp.drm_version_minor();
+ param->version_patchlevel = resp.drm_version_patchlevel();
+
+ if(param->name)
+ memcpy(param->name, resp.drm_driver_name().data(), frg::min(param->name_len,
+ resp.drm_driver_name().size()));
+ if(param->date)
+ memcpy(param->date, resp.drm_driver_date().data(), frg::min(param->date_len,
+ resp.drm_driver_date().size()));
+ if(param->desc)
+ memcpy(param->desc, resp.drm_driver_desc().data(), frg::min(param->desc_len,
+ resp.drm_driver_desc().size()));
+
+ param->name_len = resp.drm_driver_name().size();
+ param->date_len = resp.drm_driver_date().size();
+ param->desc_len = resp.drm_driver_desc().size();
+
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_GET_CAP: {
+ auto param = reinterpret_cast<drm_get_cap*>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+ req.set_drm_capability(param->capability);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+
+ if(resp.error() == managarm::fs::Errors::ILLEGAL_ARGUMENT) {
+ return EINVAL;
+ }else{
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ param->value = resp.drm_value();
+ *result = resp.result();
+ return 0;
+ }
+ }
+ case DRM_IOCTL_SET_CLIENT_CAP: {
+ auto param = reinterpret_cast<drm_set_client_cap *>(arg);
+ mlibc::infoLogger() << "\e[35mmlibc: DRM_IOCTL_SET_CLIENT_CAP(" << param->capability << ") ignores its value\e[39m" << frg::endlog;
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+ req.set_drm_capability(param->capability);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+
+ if(resp.error() == managarm::fs::Errors::ILLEGAL_ARGUMENT) {
+ return EINVAL;
+ }else{
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ param->value = resp.drm_value();
+ *result = resp.result();
+ return 0;
+ }
+ }
+ case DRM_IOCTL_GET_MAGIC: {
+ auto param = reinterpret_cast<drm_auth *>(arg);
+ mlibc::infoLogger() << "\e[31mmlibc: DRM_IOCTL_GET_MAGIC is not implemented correctly\e[39m"
+ << frg::endlog;
+ param->magic = 1;
+ *result = 0;
+ return 0;
+ }
+ case DRM_IOCTL_AUTH_MAGIC: {
+ mlibc::infoLogger() << "\e[31mmlibc: DRM_IOCTL_AUTH_MAGIC is not implemented correctly\e[39m"
+ << frg::endlog;
+ *result = 0;
+ return 0;
+ }
+ case DRM_IOCTL_SET_MASTER: {
+ mlibc::infoLogger() << "\e[31mmlibc: DRM_IOCTL_SET_MASTER is not implemented correctly\e[39m"
+ << frg::endlog;
+ *result = 0;
+ return 0;
+ }
+ case DRM_IOCTL_DROP_MASTER: {
+ mlibc::infoLogger() << "\e[31mmlibc: DRM_IOCTL_DROP_MASTER is not implemented correctly\e[39m"
+ << frg::endlog;
+ *result = 0;
+ return 0;
+ }
+ case DRM_IOCTL_MODE_GETRESOURCES: {
+ auto param = reinterpret_cast<drm_mode_card_res *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ if(recv_resp.error() == kHelErrDismissed) {
+ return EINVAL;
+ }
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ for(size_t i = 0; i < resp.drm_fb_ids_size(); i++) {
+ if(i >= param->count_fbs)
+ break;
+ auto dest = reinterpret_cast<uint32_t *>(param->fb_id_ptr);
+ dest[i] = resp.drm_fb_ids(i);
+ }
+ param->count_fbs = resp.drm_fb_ids_size();
+
+ for(size_t i = 0; i < resp.drm_crtc_ids_size(); i++) {
+ if(i >= param->count_crtcs)
+ break;
+ auto dest = reinterpret_cast<uint32_t *>(param->crtc_id_ptr);
+ dest[i] = resp.drm_crtc_ids(i);
+ }
+ param->count_crtcs = resp.drm_crtc_ids_size();
+
+ for(size_t i = 0; i < resp.drm_connector_ids_size(); i++) {
+ if(i >= param->count_connectors)
+ break;
+ auto dest = reinterpret_cast<uint32_t *>(param->connector_id_ptr);
+ dest[i] = resp.drm_connector_ids(i);
+ }
+ param->count_connectors = resp.drm_connector_ids_size();
+
+ for(size_t i = 0; i < resp.drm_encoder_ids_size(); i++) {
+ if(i >= param->count_encoders)
+ continue;
+ auto dest = reinterpret_cast<uint32_t *>(param->encoder_id_ptr);
+ dest[i] = resp.drm_encoder_ids(i);
+ }
+ param->count_encoders = resp.drm_encoder_ids_size();
+
+ param->min_width = resp.drm_min_width();
+ param->max_width = resp.drm_max_width();
+ param->min_height = resp.drm_min_height();
+ param->max_height = resp.drm_max_height();
+
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_MODE_GETCONNECTOR: {
+ auto param = reinterpret_cast<drm_mode_get_connector*>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+ req.set_drm_connector_id(param->connector_id);
+ req.set_drm_max_modes(param->count_modes);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp, recv_list] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline(),
+ helix_ng::recvBuffer(reinterpret_cast<void *>(param->modes_ptr), param->count_modes * sizeof(drm_mode_modeinfo))
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ if(recv_resp.error() == kHelErrDismissed)
+ return EINVAL;
+
+ HEL_CHECK(recv_resp.error());
+ HEL_CHECK(recv_list.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ for(size_t i = 0; i < resp.drm_encoders_size(); i++) {
+ if(i >= param->count_encoders)
+ continue;
+ auto dest = reinterpret_cast<uint32_t *>(param->encoders_ptr);
+ dest[i] = resp.drm_encoders(i);
+ }
+
+ param->encoder_id = resp.drm_encoder_id();
+ param->connector_type = resp.drm_connector_type();
+ param->connector_type_id = resp.drm_connector_type_id();
+ param->connection = resp.drm_connection();
+ param->mm_width = resp.drm_mm_width();
+ param->mm_height = resp.drm_mm_height();
+ param->subpixel = resp.drm_subpixel();
+ param->pad = 0;
+ param->count_encoders = resp.drm_encoders_size();
+ param->count_modes = resp.drm_num_modes();
+
+ if(param->props_ptr) {
+ auto id_ptr = reinterpret_cast<uint32_t *>(param->props_ptr);
+ auto val_ptr = reinterpret_cast<uint64_t *>(param->prop_values_ptr);
+
+ for(size_t i = 0; i < frg::min(static_cast<size_t>(param->count_props), resp.drm_obj_property_ids_size()); i++) {
+ id_ptr[i] = resp.drm_obj_property_ids(i);
+ val_ptr[i] = resp.drm_obj_property_values(i);
+ }
+ }
+
+ param->count_props = resp.drm_obj_property_ids_size();
+
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_MODE_GETPROPERTY: {
+ auto param = reinterpret_cast<drm_mode_get_property*>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+ req.set_drm_property_id(param->prop_id);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+
+ if(resp.error() != managarm::fs::Errors::SUCCESS) {
+ mlibc::infoLogger() << "\e[31mmlibc: DRM_IOCTL_MODE_GETPROPERTY(" << param->prop_id << ") error " << (int) resp.error() << "\e[39m"
+ << frg::endlog;
+ *result = 0;
+ return EINVAL;
+ }
+
+ memcpy(param->name, resp.drm_property_name().data(), resp.drm_property_name().size());
+ param->count_values = resp.drm_property_vals_size();
+ param->flags = resp.drm_property_flags();
+
+ for(size_t i = 0; i < param->count_values && i < resp.drm_property_vals_size() && param->values_ptr; i++) {
+ auto dest = reinterpret_cast<uint64_t *>(param->values_ptr);
+ dest[i] = resp.drm_property_vals(i);
+ }
+
+ __ensure(resp.drm_enum_name_size() == resp.drm_enum_value_size());
+
+ for(size_t i = 0; i < param->count_enum_blobs && i < resp.drm_enum_name_size() && i < resp.drm_enum_value_size(); i++) {
+ auto dest = reinterpret_cast<drm_mode_property_enum *>(param->enum_blob_ptr);
+ dest[i].value = resp.drm_enum_value(i);
+ strncpy(dest[i].name, resp.drm_enum_name(i).data(), DRM_PROP_NAME_LEN);
+ }
+
+ param->count_enum_blobs = resp.drm_enum_name_size();
+
+ *result = 0;
+ return 0;
+ }
+ case DRM_IOCTL_MODE_SETPROPERTY: {
+ auto param = reinterpret_cast<drm_mode_connector_set_property *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+ req.set_drm_property_id(param->prop_id);
+ req.set_drm_property_value(param->value);
+ req.set_drm_obj_id(param->connector_id);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+
+ if(resp.error() != managarm::fs::Errors::SUCCESS) {
+ mlibc::infoLogger() << "\e[31mmlibc: DRM_IOCTL_MODE_SETPROPERTY(" << param->prop_id << ") error " << (int) resp.error() << "\e[39m"
+ << frg::endlog;
+ *result = 0;
+ return EINVAL;
+ }
+
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_MODE_GETPROPBLOB: {
+ auto param = reinterpret_cast<drm_mode_get_blob *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+ req.set_drm_blob_id(param->blob_id);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+
+ if(resp.error() != managarm::fs::Errors::SUCCESS) {
+ mlibc::infoLogger() << "\e[31mmlibc: DRM_IOCTL_MODE_GETPROPBLOB(" << param->blob_id << ") error " << (int) resp.error() << "\e[39m"
+ << frg::endlog;
+ *result = 0;
+ return EINVAL;
+ }
+
+ uint8_t *dest = reinterpret_cast<uint8_t *>(param->data);
+ for(size_t i = 0; i < resp.drm_property_blob_size(); i++) {
+ if(i >= param->length) {
+ continue;
+ }
+
+ dest[i] = resp.drm_property_blob(i);
+ }
+
+ param->length = resp.drm_property_blob_size();
+
+ *result = 0;
+ return 0;
+ }
+ case DRM_IOCTL_MODE_GETPLANE: {
+ auto param = reinterpret_cast<drm_mode_get_plane*>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+ req.set_drm_plane_id(param->plane_id);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ param->crtc_id = resp.drm_crtc_id();
+ param->fb_id = resp.drm_fb_id();
+ param->possible_crtcs = resp.drm_possible_crtcs();
+ param->gamma_size = resp.drm_gamma_size();
+
+ // FIXME: this should be passed as a buffer with helix, but this has no bounded max size?
+ for(size_t i = 0; i < resp.drm_format_type_size(); i++) {
+ if(i >= param->count_format_types) {
+ break;
+ }
+ auto dest = reinterpret_cast<uint32_t *>(param->format_type_ptr);
+ dest[i] = resp.drm_format_type(i);
+ }
+
+ param->count_format_types = resp.drm_format_type_size();
+
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_MODE_GETPLANERESOURCES: {
+ auto param = reinterpret_cast<drm_mode_get_plane_res *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ frg::string<MemoryAllocator> ser(getSysdepsAllocator());
+ req.SerializeToString(&ser);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBuffer(ser.data(), ser.size()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ // FIXME: send this via a helix_ng buffer
+ for(size_t i = 0; i < resp.drm_plane_res_size(); i++) {
+ if(i >= param->count_planes) {
+ continue;
+ }
+ auto dest = reinterpret_cast<uint32_t *>(param->plane_id_ptr);
+ dest[i] = resp.drm_plane_res(i);
+ }
+
+ param->count_planes = resp.drm_plane_res_size();
+
+ *result = resp.result();
+
+ return 0;
+ }
+ case DRM_IOCTL_MODE_GETENCODER: {
+ auto param = reinterpret_cast<drm_mode_get_encoder*>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+ req.set_drm_encoder_id(param->encoder_id);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ param->encoder_type = resp.drm_encoder_type();
+ param->crtc_id = resp.drm_crtc_id();
+ param->possible_crtcs = resp.drm_possible_crtcs();
+ param->possible_clones = resp.drm_possible_clones();
+
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_MODE_CREATE_DUMB: {
+ auto param = reinterpret_cast<drm_mode_create_dumb*>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ req.set_drm_width(param->width);
+ req.set_drm_height(param->height);
+ req.set_drm_bpp(param->bpp);
+ req.set_drm_flags(param->flags);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ param->handle = resp.drm_handle();
+ param->pitch = resp.drm_pitch();
+ param->size = resp.drm_size();
+
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_MODE_ADDFB: {
+ auto param = reinterpret_cast<drm_mode_fb_cmd *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ req.set_drm_width(param->width);
+ req.set_drm_height(param->height);
+ req.set_drm_pitch(param->pitch);
+ req.set_drm_bpp(param->bpp);
+ req.set_drm_depth(param->depth);
+ req.set_drm_handle(param->handle);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ param->fb_id = resp.drm_fb_id();
+
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_MODE_GETFB2: {
+ auto param = reinterpret_cast<drm_mode_fb_cmd2 *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(DRM_IOCTL_MODE_GETFB2);
+ req.set_drm_fb_id(param->fb_id);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ param->width = resp.drm_width();
+ param->height = resp.drm_height();
+ param->pixel_format = resp.pixel_format();
+ param->modifier[0] = resp.modifier();
+ memcpy(param->handles, resp.drm_handles().data(), sizeof(uint32_t) * resp.drm_handles_size());
+ memcpy(param->pitches, resp.drm_pitches().data(), sizeof(uint32_t) * resp.drm_pitches_size());
+ memcpy(param->offsets, resp.drm_offsets().data(), sizeof(uint32_t) * resp.drm_offsets_size());
+
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_MODE_ADDFB2: {
+ auto param = reinterpret_cast<drm_mode_fb_cmd2 *>(arg);
+
+ __ensure(!param->flags || param->flags == DRM_MODE_FB_MODIFIERS);
+ __ensure(!param->modifier[0] || param->modifier[0] == DRM_FORMAT_MOD_INVALID);
+ __ensure(!param->offsets[0]);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(DRM_IOCTL_MODE_ADDFB2);
+
+ req.set_drm_width(param->width);
+ req.set_drm_height(param->height);
+ req.set_drm_pitch(param->pitches[0]);
+ req.set_drm_fourcc(param->pixel_format);
+ req.set_drm_handle(param->handles[0]);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ param->fb_id = resp.drm_fb_id();
+
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_MODE_RMFB: {
+ auto param = reinterpret_cast<int *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ req.set_drm_fb_id(*param);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_MODE_MAP_DUMB: {
+ auto param = reinterpret_cast<drm_mode_map_dumb*>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ req.set_drm_handle(param->handle);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ param->offset = resp.drm_offset();
+
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_MODE_GETCRTC: {
+ auto param = reinterpret_cast<drm_mode_crtc*>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+ req.set_drm_crtc_id(param->crtc_id);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp, recv_data] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline(),
+ helix_ng::recvBuffer(&param->mode, sizeof(drm_mode_modeinfo)))
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+ HEL_CHECK(recv_data.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ param->fb_id = resp.drm_fb_id();
+ param->x = resp.drm_x();
+ param->y = resp.drm_y();
+ param->gamma_size = resp.drm_gamma_size();
+ param->mode_valid = resp.drm_mode_valid();
+
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_MODE_SETCRTC: {
+ auto param = reinterpret_cast<drm_mode_crtc*>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ for(size_t i = 0; i < param->count_connectors; i++) {
+ auto dest = reinterpret_cast<uint32_t *>(param->set_connectors_ptr);
+ req.add_drm_connector_ids(dest[i]);
+ }
+ req.set_drm_x(param->x);
+ req.set_drm_y(param->y);
+ req.set_drm_crtc_id(param->crtc_id);
+ req.set_drm_fb_id(param->fb_id);
+ req.set_drm_mode_valid(param->mode_valid);
+
+ auto [offer, send_ioctl_req, send_req, send_mode, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::sendBuffer(&param->mode, sizeof(drm_mode_modeinfo)),
+ helix_ng::recvInline())
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(send_mode.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_MODE_OBJ_GETPROPERTIES: {
+ auto param = reinterpret_cast<drm_mode_obj_get_properties *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ req.set_drm_count_props(param->count_props);
+ req.set_drm_obj_id(param->obj_id);
+ req.set_drm_obj_type(param->obj_type);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ auto props = reinterpret_cast<uint32_t *>(param->props_ptr);
+ auto prop_vals = reinterpret_cast<uint64_t *>(param->prop_values_ptr);
+
+ for(size_t i = 0; i < resp.drm_obj_property_ids_size(); i++) {
+ if(i >= param->count_props) {
+ break;
+ }
+ props[i] = resp.drm_obj_property_ids(i);
+ prop_vals[i] = resp.drm_obj_property_values(i);
+ }
+
+ param->count_props = resp.drm_obj_property_ids_size();
+
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_MODE_PAGE_FLIP: {
+ auto param = reinterpret_cast<drm_mode_crtc_page_flip *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ __ensure(!(param->flags & ~DRM_MODE_PAGE_FLIP_EVENT));
+ req.set_drm_crtc_id(param->crtc_id);
+ req.set_drm_fb_id(param->fb_id);
+ req.set_drm_cookie(param->user_data);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_MODE_DIRTYFB: {
+ auto param = reinterpret_cast<drm_mode_fb_dirty_cmd*>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ req.set_drm_fb_id(param->fb_id);
+ req.set_drm_flags(param->flags);
+ req.set_drm_color(param->color);
+ for(size_t i = 0; i < param->num_clips; i++) {
+ auto dest = reinterpret_cast<drm_clip_rect *>(param->clips_ptr);
+ managarm::fs::Rect<MemoryAllocator> clip(getSysdepsAllocator());
+ clip.set_x1(dest->x1);
+ clip.set_y1(dest->y1);
+ clip.set_x2(dest->x2);
+ clip.set_y2(dest->y2);
+ req.add_drm_clips(std::move(clip));
+ }
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+
+ if(resp.error() == managarm::fs::Errors::ILLEGAL_ARGUMENT) {
+ return EINVAL;
+ }else{
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ *result = resp.result();
+ return 0;
+ }
+ }
+ case DRM_IOCTL_MODE_CURSOR: {
+ auto param = reinterpret_cast<drm_mode_cursor *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ req.set_drm_flags(param->flags);
+ req.set_drm_crtc_id(param->crtc_id);
+
+ if (param->flags == DRM_MODE_CURSOR_MOVE) {
+ req.set_drm_x(param->x);
+ req.set_drm_y(param->y);
+ } else if (param->flags == DRM_MODE_CURSOR_BO) {
+ req.set_drm_width(param->width);
+ req.set_drm_height(param->height);
+ req.set_drm_handle(param->handle);
+ } else {
+ mlibc::infoLogger() << "\e[35mmlibc: invalid flags in DRM_IOCTL_MODE_CURSOR\e[39m" << frg::endlog;
+ return EINVAL;
+ }
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+
+ if (resp.error() == managarm::fs::Errors::NO_BACKING_DEVICE) {
+ return ENXIO;
+ }else if (resp.error() == managarm::fs::Errors::ILLEGAL_ARGUMENT) {
+ return EINVAL;
+ }else{
+ *result = resp.result();
+ return 0;
+ }
+ }
+ case DRM_IOCTL_MODE_DESTROY_DUMB: {
+ auto param = reinterpret_cast<drm_mode_destroy_dumb *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ req.set_drm_handle(param->handle);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_MODE_CREATEPROPBLOB: {
+ auto param = reinterpret_cast<drm_mode_create_blob *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+ req.set_drm_blob_size(param->length);
+
+ auto [offer, send_ioctl_req, send_req, blob_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::sendBuffer(reinterpret_cast<void *>(param->data), param->length),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(blob_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ param->blob_id = resp.drm_blob_id();
+
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_MODE_DESTROYPROPBLOB: {
+ auto param = reinterpret_cast<drm_mode_destroy_blob *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+ req.set_drm_blob_id(param->blob_id);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_MODE_ATOMIC: {
+ auto param = reinterpret_cast<drm_mode_atomic *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+ req.set_drm_flags(param->flags);
+ req.set_drm_cookie(param->user_data);
+
+ size_t prop_count = 0;
+ auto objs_ptr = reinterpret_cast<uint32_t *>(param->objs_ptr);
+ auto count_props_ptr = reinterpret_cast<uint32_t *>(param->count_props_ptr);
+ auto props_ptr = reinterpret_cast<uint32_t *>(param->props_ptr);
+ auto prop_values_ptr = reinterpret_cast<uint64_t *>(param->prop_values_ptr);
+
+ for(size_t i = 0; i < param->count_objs; i++) {
+ /* list of modeobjs and their property count */
+ req.add_drm_obj_ids(objs_ptr[i]);
+ req.add_drm_prop_counts(count_props_ptr[i]);
+ prop_count += count_props_ptr[i];
+ }
+
+ for(size_t i = 0; i < prop_count; i++) {
+ /* array of property IDs */
+ req.add_drm_props(props_ptr[i]);
+ /* array of property values */
+ req.add_drm_prop_values(prop_values_ptr[i]);
+ }
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_MODE_LIST_LESSEES: {
+ mlibc::infoLogger() << "\e[35mmlibc: DRM_IOCTL_MODE_LIST_LESSEES"
+ " is not implemented correctly\e[39m" << frg::endlog;
+ return EINVAL;
+ }
+ case DRM_IOCTL_MODE_SETGAMMA: {
+ mlibc::infoLogger() << "\e[35mmlibc: DRM_IOCTL_MODE_SETGAMMA"
+ " is not implemented correctly\e[39m" << frg::endlog;
+ return 0;
+ }
+ case DRM_IOCTL_MODE_CREATE_LEASE: {
+ auto param = reinterpret_cast<drm_mode_create_lease *>(arg);
+
+ mlibc::infoLogger() << "\e[35mmlibc: DRM_IOCTL_MODE_CREATE_LEASE"
+ " is a noop\e[39m" << frg::endlog;
+ param->lessee_id = 1;
+ param->fd = fd;
+ *result = 0;
+ return 0;
+ }
+ case DRM_IOCTL_GEM_CLOSE: {
+ mlibc::infoLogger() << "\e[35mmlibc: DRM_IOCTL_GEM_CLOSE"
+ " is a noop\e[39m" << frg::endlog;
+ return 0;
+ }
+ case DRM_IOCTL_PRIME_HANDLE_TO_FD: {
+ auto param = reinterpret_cast<drm_prime_handle *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+ req.set_drm_prime_handle(param->handle);
+ req.set_drm_flags(param->flags);
+
+ auto [offer, send_ioctl_req, send_req, send_creds, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::imbueCredentials(),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(send_creds.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ param->fd = resp.drm_prime_fd();
+ *result = resp.result();
+ return 0;
+ }
+ case DRM_IOCTL_PRIME_FD_TO_HANDLE: {
+ auto param = reinterpret_cast<drm_prime_handle *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+ req.set_drm_flags(param->flags);
+
+ auto [offer, send_ioctl_req, send_req, send_creds, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::imbueCredentials(getHandleForFd(param->fd)),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(send_creds.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::fs::Errors::FILE_NOT_FOUND) {
+ return EBADF;
+ } else {
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ }
+
+ param->handle = resp.drm_prime_handle();
+ *result = resp.result();
+ return 0;
+ }
+ }
+
+ mlibc::infoLogger() << "mlibc: Unexpected DRM ioctl with"
+ << ", number: 0x" << frg::hex_fmt(_IOC_NR(request))
+ << " (raw request: " << frg::hex_fmt(request) << ")" << frg::endlog;
+ __ensure(!"Illegal ioctl request");
+ __builtin_unreachable();
+}
+
+} //namespace mlibc
diff --git a/lib/mlibc/sysdeps/managarm/generic/ensure.cpp b/lib/mlibc/sysdeps/managarm/generic/ensure.cpp
new file mode 100644
index 0000000..ab0d84f
--- /dev/null
+++ b/lib/mlibc/sysdeps/managarm/generic/ensure.cpp
@@ -0,0 +1,38 @@
+
+#include <stdint.h>
+#include <string.h>
+#include <assert.h>
+
+#include <bits/ensure.h>
+#include <mlibc/debug.hpp>
+#include <mlibc/all-sysdeps.hpp>
+
+#include <hel.h>
+#include <hel-syscalls.h>
+
+void __frigg_assert_fail(const char *assertion, const char *file, unsigned int line,
+ const char *function) {
+ mlibc::panicLogger() << "In function " << function
+ << ", file " << file << ":" << line << "\n"
+ << "__ensure(" << assertion << ") failed" << frg::endlog;
+}
+
+namespace mlibc {
+ void sys_libc_log(const char *message) {
+ // This implementation is inherently signal-safe.
+ size_t n = 0;
+ while(message[n])
+ n++;
+ HEL_CHECK(helLog(message, n));
+ }
+
+ void sys_libc_panic() {
+ // This implementation is inherently signal-safe.
+ const char *message = "mlibc: Panic!";
+ size_t n = 0;
+ while(message[n])
+ n++;
+ helPanic(message, n);
+ }
+}
+
diff --git a/lib/mlibc/sysdeps/managarm/generic/entry.cpp b/lib/mlibc/sysdeps/managarm/generic/entry.cpp
new file mode 100644
index 0000000..3ff28d2
--- /dev/null
+++ b/lib/mlibc/sysdeps/managarm/generic/entry.cpp
@@ -0,0 +1,132 @@
+#include <pthread.h>
+#include <stdlib.h>
+#include <sys/auxv.h>
+
+#include <frg/eternal.hpp>
+
+#include <bits/ensure.h>
+#include <mlibc/allocator.hpp>
+#include <mlibc/debug.hpp>
+#include <mlibc/posix-pipe.hpp>
+#include <mlibc/all-sysdeps.hpp>
+#include <mlibc/elf/startup.h>
+
+#include <protocols/posix/data.hpp>
+#include <protocols/posix/supercalls.hpp>
+
+// defined by the POSIX library
+void __mlibc_initLocale();
+
+extern "C" uintptr_t *__dlapi_entrystack();
+extern "C" void __dlapi_enter(uintptr_t *);
+
+// declared in posix-pipe.hpp
+thread_local Queue globalQueue;
+
+// TODO: clock tracker page and file table don't need to be thread-local!
+thread_local HelHandle __mlibc_posix_lane;
+thread_local void *__mlibc_clk_tracker_page;
+
+namespace {
+ thread_local unsigned __mlibc_gsf_nesting;
+ thread_local void *__mlibc_cached_thread_page;
+ thread_local HelHandle *cachedFileTable;
+
+ // This construction is a bit weird: Even though the variables above
+ // are thread_local we still protect their initialization with a pthread_once_t
+ // (instead of using a C++ constructor).
+ // We do this in order to able to clear the pthread_once_t after a fork.
+ thread_local pthread_once_t has_cached_infos = PTHREAD_ONCE_INIT;
+
+ void actuallyCacheInfos() {
+ posix::ManagarmProcessData data;
+ HEL_CHECK(helSyscall1(kHelCallSuper + posix::superGetProcessData, reinterpret_cast<HelWord>(&data)));
+
+ __mlibc_posix_lane = data.posixLane;
+ __mlibc_cached_thread_page = data.threadPage;
+ cachedFileTable = data.fileTable;
+ __mlibc_clk_tracker_page = data.clockTrackerPage;
+ }
+}
+
+SignalGuard::SignalGuard() {
+ pthread_once(&has_cached_infos, &actuallyCacheInfos);
+ if(!__mlibc_cached_thread_page)
+ return;
+ auto p = reinterpret_cast<unsigned int *>(__mlibc_cached_thread_page);
+ if(!__mlibc_gsf_nesting)
+ __atomic_store_n(p, 1, __ATOMIC_RELAXED);
+ __mlibc_gsf_nesting++;
+}
+
+SignalGuard::~SignalGuard() {
+ pthread_once(&has_cached_infos, &actuallyCacheInfos);
+ if(!__mlibc_cached_thread_page)
+ return;
+ auto p = reinterpret_cast<unsigned int *>(__mlibc_cached_thread_page);
+ __ensure(__mlibc_gsf_nesting > 0);
+ __mlibc_gsf_nesting--;
+ if(!__mlibc_gsf_nesting) {
+ unsigned int result = __atomic_exchange_n(p, 0, __ATOMIC_RELAXED);
+ if(result == 2) {
+ HEL_CHECK(helSyscall0(kHelCallSuper + posix::superSigRaise));
+ }else{
+ __ensure(result == 1);
+ }
+ }
+}
+
+MemoryAllocator &getSysdepsAllocator() {
+ // use frg::eternal to prevent a call to __cxa_atexit().
+ // this is necessary because __cxa_atexit() call this function.
+ static frg::eternal<VirtualAllocator> virtualAllocator;
+ static frg::eternal<MemoryPool> heap{virtualAllocator.get()};
+ static frg::eternal<MemoryAllocator> singleton{&heap.get()};
+ return singleton.get();
+}
+
+HelHandle getPosixLane() {
+ cacheFileTable();
+ return __mlibc_posix_lane;
+}
+
+HelHandle *cacheFileTable() {
+ // TODO: Make sure that this is signal-safe (it is called e.g. by sys_clock_get()).
+ pthread_once(&has_cached_infos, &actuallyCacheInfos);
+ return cachedFileTable;
+}
+
+HelHandle getHandleForFd(int fd) {
+ if (fd >= 512)
+ return 0;
+
+ return cacheFileTable()[fd];
+}
+
+void clearCachedInfos() {
+ has_cached_infos = PTHREAD_ONCE_INIT;
+}
+
+struct LibraryGuard {
+ LibraryGuard();
+};
+
+static LibraryGuard guard;
+
+extern char **environ;
+static mlibc::exec_stack_data __mlibc_stack_data;
+
+LibraryGuard::LibraryGuard() {
+ __mlibc_initLocale();
+
+ // Parse the exec() stack.
+ mlibc::parse_exec_stack(__dlapi_entrystack(), &__mlibc_stack_data);
+ mlibc::set_startup_data(__mlibc_stack_data.argc, __mlibc_stack_data.argv,
+ __mlibc_stack_data.envp);
+}
+
+extern "C" void __mlibc_entry(uintptr_t *entry_stack, int (*main_fn)(int argc, char *argv[], char *env[])) {
+ __dlapi_enter(entry_stack);
+ auto result = main_fn(__mlibc_stack_data.argc, __mlibc_stack_data.argv, environ);
+ exit(result);
+}
diff --git a/lib/mlibc/sysdeps/managarm/generic/file.cpp b/lib/mlibc/sysdeps/managarm/generic/file.cpp
new file mode 100644
index 0000000..1f5cba6
--- /dev/null
+++ b/lib/mlibc/sysdeps/managarm/generic/file.cpp
@@ -0,0 +1,2526 @@
+#include <asm/ioctls.h>
+#include <dirent.h>
+#include <errno.h>
+#include <stdio.h>
+#include <sys/eventfd.h>
+#include <sys/inotify.h>
+#include <sys/signalfd.h>
+#include <unistd.h>
+
+#include <bits/ensure.h>
+#include <mlibc/all-sysdeps.hpp>
+#include <mlibc/allocator.hpp>
+#include <mlibc/posix-pipe.hpp>
+
+#include <fs.frigg_bragi.hpp>
+#include <posix.frigg_bragi.hpp>
+
+HelHandle __mlibc_getPassthrough(int fd) {
+ auto handle = getHandleForFd(fd);
+ __ensure(handle);
+ return handle;
+}
+
+namespace mlibc {
+
+int sys_chdir(const char *path) {
+ SignalGuard sguard;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::CHDIR);
+ req.set_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), path));
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::FILE_NOT_FOUND) {
+ return ENOENT;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+ }
+}
+
+int sys_fchdir(int fd) {
+ SignalGuard sguard;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::FCHDIR);
+ req.set_fd(fd);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+}
+
+int sys_chroot(const char *path) {
+ SignalGuard sguard;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::CHROOT);
+ req.set_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), path));
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+}
+
+int sys_mkdir(const char *path, mode_t mode) {
+ return sys_mkdirat(AT_FDCWD, path, mode);
+}
+
+int sys_mkdirat(int dirfd, const char *path, mode_t mode) {
+ (void)mode;
+ SignalGuard sguard;
+
+ managarm::posix::MkdirAtRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_fd(dirfd);
+ req.set_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), path));
+
+ auto [offer, send_head, send_tail, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadTail(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(send_tail.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::ALREADY_EXISTS) {
+ return EEXIST;
+ } else if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ }else if(resp.error() == managarm::posix::Errors::BAD_FD) {
+ return EBADF;
+ }else if(resp.error() == managarm::posix::Errors::FILE_NOT_FOUND) {
+ return ENOENT;
+ }else if(resp.error() == managarm::posix::Errors::NOT_A_DIRECTORY) {
+ return ENOTDIR;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+ }
+}
+
+int sys_symlink(const char *target_path, const char *link_path) {
+ return sys_symlinkat(target_path, AT_FDCWD, link_path);
+}
+
+int sys_symlinkat(const char *target_path, int dirfd, const char *link_path) {
+ SignalGuard sguard;
+
+ managarm::posix::SymlinkAtRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_fd(dirfd);
+ req.set_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), link_path));
+ req.set_target_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), target_path));
+
+ auto [offer, send_head, send_tail, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadTail(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(send_tail.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::FILE_NOT_FOUND) {
+ return ENOENT;
+ }else if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ }else if(resp.error() == managarm::posix::Errors::BAD_FD) {
+ return EBADF;
+ }else if(resp.error() == managarm::posix::Errors::NOT_A_DIRECTORY) {
+ return ENOTDIR;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+ }
+}
+
+int sys_link(const char *old_path, const char *new_path) {
+ return sys_linkat(AT_FDCWD, old_path, AT_FDCWD, new_path, 0);
+}
+
+int sys_linkat(int olddirfd, const char *old_path, int newdirfd, const char *new_path, int flags) {
+ SignalGuard sguard;
+
+ managarm::posix::LinkAtRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), old_path));
+ req.set_target_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), new_path));
+ req.set_fd(olddirfd);
+ req.set_newfd(newdirfd);
+ req.set_flags(flags);
+
+ auto [offer, send_head, send_tail, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadTail(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(send_tail.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::FILE_NOT_FOUND) {
+ return ENOENT;
+ }else if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ }else if(resp.error() == managarm::posix::Errors::BAD_FD) {
+ return EBADF;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+ }
+}
+
+int sys_rename(const char *path, const char *new_path) {
+ return sys_renameat(AT_FDCWD, path, AT_FDCWD, new_path);
+}
+
+int sys_renameat(int olddirfd, const char *old_path, int newdirfd, const char *new_path) {
+ SignalGuard sguard;
+
+ managarm::posix::RenameAtRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), old_path));
+ req.set_target_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), new_path));
+ req.set_fd(olddirfd);
+ req.set_newfd(newdirfd);
+
+ auto [offer, send_head, send_tail, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadTail(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(send_tail.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::FILE_NOT_FOUND) {
+ return ENOENT;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+ }
+}
+
+} //namespace mlibc
+
+namespace mlibc {
+
+int sys_fcntl(int fd, int request, va_list args, int *result) {
+ SignalGuard sguard;
+ if(request == F_DUPFD) {
+ int newfd;
+ if(int e = sys_dup(fd, 0, &newfd); e)
+ return e;
+ *result = newfd;
+ return 0;
+ }else if(request == F_DUPFD_CLOEXEC) {
+ int newfd;
+ if(int e = sys_dup(fd, O_CLOEXEC, &newfd); e)
+ return e;
+ *result = newfd;
+ return 0;
+ }else if(request == F_GETFD) {
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::FD_GET_FLAGS);
+ req.set_fd(fd);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::NO_SUCH_FD)
+ return EBADF;
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ *result = resp.flags();
+ return 0;
+ }else if(request == F_SETFD) {
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::FD_SET_FLAGS);
+ req.set_fd(fd);
+ req.set_flags(va_arg(args, int));
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::NO_SUCH_FD)
+ return EBADF;
+ else if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS)
+ return EINVAL;
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ *result = static_cast<int>(resp.error());
+ return 0;
+ }else if(request == F_GETFL) {
+ SignalGuard sguard;
+
+ auto handle = getHandleForFd(fd);
+ if (!handle)
+ return EBADF;
+
+ managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_req_type(managarm::fs::CntReqType::PT_GET_FILE_FLAGS);
+ req.set_fd(fd);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::fs::Errors::ILLEGAL_OPERATION_TARGET) {
+ mlibc::infoLogger() << "\e[31mmlibc: fcntl(F_GETFL) unimplemented for this file\e[39m" << frg::endlog;
+ return EINVAL;
+ }
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ *result = resp.flags();
+ return 0;
+ }else if(request == F_SETFL) {
+ SignalGuard sguard;
+
+ auto handle = getHandleForFd(fd);
+ if (!handle)
+ return EBADF;
+
+ managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_req_type(managarm::fs::CntReqType::PT_SET_FILE_FLAGS);
+ req.set_fd(fd);
+ req.set_flags(va_arg(args, int));
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::fs::Errors::ILLEGAL_OPERATION_TARGET) {
+ mlibc::infoLogger() << "\e[31mmlibc: fcntl(F_SETFL) unimplemented for this file\e[39m" << frg::endlog;
+ return EINVAL;
+ }
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ *result = 0;
+ return 0;
+ }else if(request == F_SETLK) {
+ mlibc::infoLogger() << "\e[31mmlibc: F_SETLK\e[39m" << frg::endlog;
+ return 0;
+ }else if(request == F_SETLKW) {
+ mlibc::infoLogger() << "\e[31mmlibc: F_SETLKW\e[39m" << frg::endlog;
+ return 0;
+ }else if(request == F_GETLK) {
+ struct flock *lock = va_arg(args, struct flock *);
+ lock->l_type = F_UNLCK;
+ mlibc::infoLogger() << "\e[31mmlibc: F_GETLK is stubbed!\e[39m" << frg::endlog;
+ return 0;
+ }else if(request == F_ADD_SEALS) {
+ auto seals = va_arg(args, int);
+ auto handle = getHandleForFd(fd);
+ if (!handle)
+ return EBADF;
+
+ managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_req_type(managarm::fs::CntReqType::PT_ADD_SEALS);
+ req.set_fd(fd);
+ req.set_seals(seals);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::RecvInline()
+ ));
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::fs::Errors::ILLEGAL_OPERATION_TARGET) {
+ mlibc::infoLogger() << "\e[31mmlibc: fcntl(F_ADD_SEALS) unimplemented for this file\e[39m" << frg::endlog;
+ return EINVAL;
+ } else if(resp.error() == managarm::fs::Errors::INSUFFICIENT_PERMISSIONS) {
+ return EPERM;
+ }
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ *result = resp.seals();
+ return 0;
+ }else if(request == F_GET_SEALS) {
+ auto handle = getHandleForFd(fd);
+ if (!handle)
+ return EBADF;
+
+ managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_req_type(managarm::fs::CntReqType::PT_GET_SEALS);
+ req.set_fd(fd);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::RecvInline()
+ ));
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::fs::Errors::ILLEGAL_OPERATION_TARGET) {
+ mlibc::infoLogger() << "\e[31mmlibc: fcntl(F_GET_SEALS) unimplemented for this file\e[39m" << frg::endlog;
+ return EINVAL;
+ }
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ *result = resp.seals();
+ return 0;
+ }else{
+ mlibc::infoLogger() << "\e[31mmlibc: Unexpected fcntl() request: "
+ << request << "\e[39m" << frg::endlog;
+ return EINVAL;
+ }
+}
+
+int sys_open_dir(const char *path, int *handle) {
+ return sys_open(path, 0, 0, handle);
+}
+
+int sys_read_entries(int fd, void *buffer, size_t max_size, size_t *bytes_read) {
+ SignalGuard sguard;
+ auto handle = getHandleForFd(fd);
+ if (!handle)
+ return EBADF;
+
+ managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_req_type(managarm::fs::CntReqType::PT_READ_ENTRIES);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::fs::Errors::END_OF_FILE) {
+ *bytes_read = 0;
+ return 0;
+ }else{
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ __ensure(max_size > sizeof(struct dirent));
+ auto ent = new (buffer) struct dirent;
+ memset(ent, 0, sizeof(struct dirent));
+ memcpy(ent->d_name, resp.path().data(), resp.path().size());
+ ent->d_reclen = sizeof(struct dirent);
+ *bytes_read = sizeof(struct dirent);
+ return 0;
+ }
+}
+
+int sys_ttyname(int fd, char *buf, size_t size) {
+ SignalGuard sguard;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::TTY_NAME);
+ req.set_fd(fd);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::BAD_FD) {
+ return EBADF;
+ }else if(resp.error() == managarm::posix::Errors::NOT_A_TTY) {
+ return ENOTTY;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ __ensure(size >= resp.path().size() + 1);
+ memcpy(buf, resp.path().data(), size);
+ buf[resp.path().size()] = '\0';
+ return 0;
+ }
+}
+
+int sys_fdatasync(int) {
+ mlibc::infoLogger() << "\e[35mmlibc: fdatasync() is a no-op\e[39m"
+ << frg::endlog;
+ return 0;
+}
+
+int sys_getcwd(char *buffer, size_t size) {
+ SignalGuard sguard;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::GETCWD);
+ req.set_size(size);
+
+ auto [offer, send_req, recv_resp, recv_path] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline(),
+ helix_ng::recvBuffer(buffer, size))
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+ HEL_CHECK(recv_path.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ if(static_cast<size_t>(resp.size()) >= size)
+ return ERANGE;
+ return 0;
+}
+
+int sys_vm_map(void *hint, size_t size, int prot, int flags, int fd, off_t offset, void **window) {
+ SignalGuard sguard;
+
+ managarm::posix::VmMapRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_address_hint(reinterpret_cast<uintptr_t>(hint));
+ req.set_size(size);
+ req.set_mode(prot);
+ req.set_flags(flags);
+ req.set_fd(fd);
+ req.set_rel_offset(offset);
+
+ auto [offer, sendReq, recvResp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(sendReq.error());
+ HEL_CHECK(recvResp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recvResp.data(), recvResp.length());
+ if(resp.error() == managarm::posix::Errors::ALREADY_EXISTS) {
+ return EEXIST;
+ }else if(resp.error() == managarm::posix::Errors::NO_MEMORY) {
+ return EFAULT;
+ }else if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ }else {
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ *window = reinterpret_cast<void *>(resp.offset());
+ }
+
+ return 0;
+}
+
+int sys_vm_remap(void *pointer, size_t size, size_t new_size, void **window) {
+ SignalGuard sguard;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::VM_REMAP);
+ req.set_address(reinterpret_cast<uintptr_t>(pointer));
+ req.set_size(size);
+ req.set_new_size(new_size);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ *window = reinterpret_cast<void *>(resp.offset());
+ return 0;
+}
+
+int sys_vm_protect(void *pointer, size_t size, int prot) {
+ SignalGuard sguard;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::VM_PROTECT);
+ req.set_address(reinterpret_cast<uintptr_t>(pointer));
+ req.set_size(size);
+ req.set_mode(prot);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+}
+
+int sys_vm_unmap(void *pointer, size_t size) {
+ SignalGuard sguard;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::VM_UNMAP);
+ req.set_address(reinterpret_cast<uintptr_t>(pointer));
+ req.set_size(size);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+}
+
+int sys_setsid(pid_t *sid) {
+ SignalGuard sguard;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::SETSID);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::ACCESS_DENIED) {
+ *sid = -1;
+ return EPERM;
+ }
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ *sid = resp.sid();
+ return 0;
+}
+
+int sys_tcgetattr(int fd, struct termios *attr) {
+ int result;
+ if(int e = sys_ioctl(fd, TCGETS, attr, &result); e)
+ return e;
+ return 0;
+}
+
+int sys_tcsetattr(int fd, int when, const struct termios *attr) {
+ if(when < TCSANOW || when > TCSAFLUSH)
+ return EINVAL;
+
+ if(int e = sys_ioctl(fd, TCSETS, const_cast<struct termios *>(attr), nullptr); e)
+ return e;
+ return 0;
+}
+
+int sys_tcdrain(int) {
+ mlibc::infoLogger() << "\e[35mmlibc: tcdrain() is a stub\e[39m" << frg::endlog;
+ return 0;
+}
+
+int sys_socket(int domain, int type_and_flags, int proto, int *fd) {
+ constexpr int type_mask = int(0xF);
+ constexpr int flags_mask = ~int(0xF);
+ __ensure(!((type_and_flags & flags_mask) & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)));
+
+ SignalGuard sguard;
+
+ managarm::posix::SocketRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_domain(domain);
+ req.set_socktype(type_and_flags & type_mask);
+ req.set_protocol(proto);
+ req.set_flags(type_and_flags & flags_mask);
+
+ auto [offer, sendReq, recvResp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(sendReq.error());
+ HEL_CHECK(recvResp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recvResp.data(), recvResp.length());
+ if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EAFNOSUPPORT;
+ } else {
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ *fd = resp.fd();
+ return 0;
+ }
+}
+
+int sys_pipe(int *fds, int flags) {
+ SignalGuard sguard;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::PIPE_CREATE);
+ req.set_flags(flags);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ __ensure(resp.fds_size() == 2);
+ fds[0] = resp.fds(0);
+ fds[1] = resp.fds(1);
+ return 0;
+}
+
+int sys_socketpair(int domain, int type_and_flags, int proto, int *fds) {
+ constexpr int type_mask = int(0xF);
+ constexpr int flags_mask = ~int(0xF);
+ __ensure(!((type_and_flags & flags_mask) & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)));
+
+ SignalGuard sguard;
+
+ managarm::posix::SockpairRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_domain(domain);
+ req.set_socktype(type_and_flags & type_mask);
+ req.set_protocol(proto);
+ req.set_flags(type_and_flags & flags_mask);
+
+ auto [offer, sendReq, recvResp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(sendReq.error());
+ HEL_CHECK(recvResp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recvResp.data(), recvResp.length());
+ if(resp.error() == managarm::posix::Errors::PROTOCOL_NOT_SUPPORTED) {
+ return EPROTONOSUPPORT;
+ } else {
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ }
+ __ensure(resp.fds_size() == 2);
+ fds[0] = resp.fds(0);
+ fds[1] = resp.fds(1);
+ return 0;
+}
+
+int sys_msg_send(int sockfd, const struct msghdr *hdr, int flags, ssize_t *length) {
+ frg::vector<HelSgItem, MemoryAllocator> sglist{getSysdepsAllocator()};
+ auto handle = getHandleForFd(sockfd);
+ if (!handle)
+ return EBADF;
+
+ size_t overall_size = 0;
+ for(int i = 0; i < hdr->msg_iovlen; i++) {
+ HelSgItem item{
+ .buffer = hdr->msg_iov[i].iov_base,
+ .length = hdr->msg_iov[i].iov_len,
+ };
+ sglist.push_back(item);
+ overall_size += hdr->msg_iov[i].iov_len;
+ }
+
+ SignalGuard sguard;
+
+ managarm::fs::SendMsgRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_flags(flags);
+ req.set_size(overall_size);
+
+ for(auto cmsg = CMSG_FIRSTHDR(hdr); cmsg; cmsg = CMSG_NXTHDR(hdr, cmsg)) {
+ __ensure(cmsg->cmsg_level == SOL_SOCKET);
+ if(cmsg->cmsg_type == SCM_CREDENTIALS) {
+ mlibc::infoLogger() << "mlibc: SCM_CREDENTIALS requested but we don't handle that yet!" << frg::endlog;
+ return EINVAL;
+ }
+ __ensure(cmsg->cmsg_type == SCM_RIGHTS);
+ __ensure(cmsg->cmsg_len >= sizeof(struct cmsghdr));
+
+ size_t size = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
+ __ensure(!(size % sizeof(int)));
+ for(size_t off = 0; off < size; off += sizeof(int)) {
+ int fd;
+ memcpy(&fd, CMSG_DATA(cmsg) + off, sizeof(int));
+ req.add_fds(fd);
+ }
+ }
+
+ auto [offer, send_head, send_tail, send_data, imbue_creds, send_addr, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadTail(req, getSysdepsAllocator()),
+ helix_ng::sendBufferSg(sglist.data(), hdr->msg_iovlen),
+ helix_ng::imbueCredentials(),
+ helix_ng::sendBuffer(hdr->msg_name, hdr->msg_namelen),
+ helix_ng::recvInline())
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(send_tail.error());
+ HEL_CHECK(send_data.error());
+ HEL_CHECK(imbue_creds.error());
+ HEL_CHECK(send_addr.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::SendMsgReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+
+ if(resp.error() == managarm::fs::Errors::BROKEN_PIPE) {
+ return EPIPE;
+ }else if(resp.error() == managarm::fs::Errors::NOT_CONNECTED) {
+ return ENOTCONN;
+ }else if(resp.error() == managarm::fs::Errors::WOULD_BLOCK) {
+ return EAGAIN;
+ }else if(resp.error() == managarm::fs::Errors::HOST_UNREACHABLE) {
+ return EHOSTUNREACH;
+ }else if(resp.error() == managarm::fs::Errors::ACCESS_DENIED) {
+ return EACCES;
+ }else if(resp.error() == managarm::fs::Errors::NETWORK_UNREACHABLE) {
+ return ENETUNREACH;
+ }else if(resp.error() == managarm::fs::Errors::DESTINATION_ADDRESS_REQUIRED) {
+ return EDESTADDRREQ;
+ }else if(resp.error() == managarm::fs::Errors::ADDRESS_NOT_AVAILABLE) {
+ return EADDRNOTAVAIL;
+ }else if(resp.error() == managarm::fs::Errors::ILLEGAL_ARGUMENT) {
+ return EINVAL;
+ }else if(resp.error() == managarm::fs::Errors::AF_NOT_SUPPORTED) {
+ return EAFNOSUPPORT;
+ }else if(resp.error() == managarm::fs::Errors::MESSAGE_TOO_LARGE) {
+ return EMSGSIZE;
+ }else{
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ *length = resp.size();
+ return 0;
+ }
+}
+
+int sys_msg_recv(int sockfd, struct msghdr *hdr, int flags, ssize_t *length) {
+ if(!hdr->msg_iovlen) {
+ return EMSGSIZE;
+ }
+
+ auto handle = getHandleForFd(sockfd);
+ if (!handle)
+ return EBADF;
+
+ SignalGuard sguard;
+
+ managarm::fs::RecvMsgRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_flags(flags);
+ req.set_size(hdr->msg_iov[0].iov_len);
+ req.set_addr_size(hdr->msg_namelen);
+ req.set_ctrl_size(hdr->msg_controllen);
+
+ auto [offer, send_req, imbue_creds, recv_resp, recv_addr, recv_data, recv_ctrl] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::imbueCredentials(),
+ helix_ng::recvInline(),
+ helix_ng::recvBuffer(hdr->msg_name, hdr->msg_namelen),
+ helix_ng::recvBuffer(hdr->msg_iov[0].iov_base, hdr->msg_iov[0].iov_len),
+ helix_ng::recvBuffer(hdr->msg_control, hdr->msg_controllen))
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(imbue_creds.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::RecvMsgReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+
+ if(resp.error() == managarm::fs::Errors::WOULD_BLOCK) {
+ return EAGAIN;
+ }else{
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ HEL_CHECK(recv_addr.error());
+ HEL_CHECK(recv_data.error());
+ HEL_CHECK(recv_ctrl.error());
+
+ hdr->msg_namelen = resp.addr_size();
+ hdr->msg_controllen = recv_ctrl.actualLength();
+ hdr->msg_flags = resp.flags();
+ *length = resp.ret_val();
+ return 0;
+ }
+}
+
+int sys_pselect(int, fd_set *read_set, fd_set *write_set,
+ fd_set *except_set, const struct timespec *timeout,
+ const sigset_t *sigmask, int *num_events) {
+ // TODO: Do not keep errors from epoll (?).
+ int fd = epoll_create1(0);
+ if(fd == -1)
+ return -1;
+
+ for(int k = 0; k < FD_SETSIZE; k++) {
+ struct epoll_event ev;
+ memset(&ev, 0, sizeof(struct epoll_event));
+
+ if(read_set && FD_ISSET(k, read_set))
+ ev.events |= EPOLLIN; // TODO: Additional events.
+ if(write_set && FD_ISSET(k, write_set))
+ ev.events |= EPOLLOUT; // TODO: Additional events.
+ if(except_set && FD_ISSET(k, except_set))
+ ev.events |= EPOLLPRI;
+
+ if(!ev.events)
+ continue;
+ ev.data.u32 = k;
+
+ if(epoll_ctl(fd, EPOLL_CTL_ADD, k, &ev))
+ return -1;
+ }
+
+ struct epoll_event evnts[16];
+ int n = epoll_pwait(fd, evnts, 16,
+ timeout ? (timeout->tv_sec * 1000 + timeout->tv_nsec / 100) : -1, sigmask);
+ if(n == -1)
+ return -1;
+
+ fd_set res_read_set;
+ fd_set res_write_set;
+ fd_set res_except_set;
+ FD_ZERO(&res_read_set);
+ FD_ZERO(&res_write_set);
+ FD_ZERO(&res_except_set);
+ int m = 0;
+
+ for(int i = 0; i < n; i++) {
+ int k = evnts[i].data.u32;
+
+ if(read_set && FD_ISSET(k, read_set)
+ && evnts[i].events & (EPOLLIN | EPOLLERR | EPOLLHUP)) {
+ FD_SET(k, &res_read_set);
+ m++;
+ }
+
+ if(write_set && FD_ISSET(k, write_set)
+ && evnts[i].events & (EPOLLOUT | EPOLLERR | EPOLLHUP)) {
+ FD_SET(k, &res_write_set);
+ m++;
+ }
+
+ if(except_set && FD_ISSET(k, except_set)
+ && evnts[i].events & EPOLLPRI) {
+ FD_SET(k, &res_except_set);
+ m++;
+ }
+ }
+
+ if(close(fd))
+ __ensure("close() failed on epoll file");
+
+ if(read_set)
+ memcpy(read_set, &res_read_set, sizeof(fd_set));
+ if(write_set)
+ memcpy(write_set, &res_write_set, sizeof(fd_set));
+ if(except_set)
+ memcpy(except_set, &res_except_set, sizeof(fd_set));
+
+ *num_events = m;
+ return 0;
+}
+
+int sys_poll(struct pollfd *fds, nfds_t count, int timeout, int *num_events) {
+ __ensure(timeout >= 0 || timeout == -1); // TODO: Report errors correctly.
+
+ SignalGuard sguard;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::EPOLL_CALL);
+ req.set_timeout(timeout > 0 ? int64_t{timeout} * 1000000 : timeout);
+
+ for(nfds_t i = 0; i < count; i++) {
+ req.add_fds(fds[i].fd);
+ req.add_events(fds[i].events);
+ }
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ } else {
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ __ensure(resp.events_size() == count);
+
+ int m = 0;
+ for(nfds_t i = 0; i < count; i++) {
+ if(resp.events(i))
+ m++;
+ fds[i].revents = resp.events(i);
+ }
+
+ *num_events = m;
+ return 0;
+ }
+}
+
+int sys_epoll_create(int flags, int *fd) {
+ // Some applications assume EPOLL_CLOEXEC and O_CLOEXEC to be the same.
+ // They are on linux, but not yet on managarm.
+ __ensure(!(flags & ~(EPOLL_CLOEXEC | O_CLOEXEC)));
+
+ SignalGuard sguard;
+
+ uint32_t proto_flags = 0;
+ if(flags & EPOLL_CLOEXEC || flags & O_CLOEXEC)
+ proto_flags |= managarm::posix::OpenFlags::OF_CLOEXEC;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::EPOLL_CREATE);
+ req.set_flags(proto_flags);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ *fd = resp.fd();
+ return 0;
+}
+
+int sys_epoll_ctl(int epfd, int mode, int fd, struct epoll_event *ev) {
+ SignalGuard sguard;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ if(mode == EPOLL_CTL_ADD) {
+ __ensure(ev);
+ req.set_request_type(managarm::posix::CntReqType::EPOLL_ADD);
+ req.set_flags(ev->events);
+ req.set_cookie(ev->data.u64);
+ }else if(mode == EPOLL_CTL_MOD) {
+ __ensure(ev);
+ req.set_request_type(managarm::posix::CntReqType::EPOLL_MODIFY);
+ req.set_flags(ev->events);
+ req.set_cookie(ev->data.u64);
+ }else if(mode == EPOLL_CTL_DEL) {
+ req.set_request_type(managarm::posix::CntReqType::EPOLL_DELETE);
+ }else{
+ mlibc::panicLogger() << "\e[31mmlibc: Illegal epoll_ctl() mode\e[39m" << frg::endlog;
+ }
+ req.set_fd(epfd);
+ req.set_newfd(fd);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::BAD_FD) {
+ return EBADF;
+ } else if(resp.error() == managarm::posix::Errors::ALREADY_EXISTS) {
+ return EEXIST;
+ } else if(resp.error() == managarm::posix::Errors::FILE_NOT_FOUND) {
+ return ENOENT;
+ } else {
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+ }
+ return 0;
+}
+
+int sys_epoll_pwait(int epfd, struct epoll_event *ev, int n,
+ int timeout, const sigset_t *sigmask, int *raised) {
+ __ensure(timeout >= 0 || timeout == -1); // TODO: Report errors correctly.
+
+ SignalGuard sguard;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::EPOLL_WAIT);
+ req.set_fd(epfd);
+ req.set_size(n);
+ req.set_timeout(timeout > 0 ? int64_t{timeout} * 1000000 : timeout);
+ if(sigmask != NULL) {
+ req.set_sigmask((long int)*sigmask);
+ req.set_sigmask_needed(true);
+ } else {
+ req.set_sigmask_needed(false);
+ }
+
+ auto [offer, send_req, recv_resp, recv_data] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline(),
+ helix_ng::recvBuffer(ev, n * sizeof(struct epoll_event)))
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+ HEL_CHECK(recv_data.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::BAD_FD) {
+ return EBADF;
+ }
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ __ensure(!(recv_data.actualLength() % sizeof(struct epoll_event)));
+ *raised = recv_data.actualLength() / sizeof(struct epoll_event);
+ return 0;
+}
+
+int sys_timerfd_create(int clockid, int flags, int *fd) {
+ (void) clockid;
+ SignalGuard sguard;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::TIMERFD_CREATE);
+ req.set_flags(flags);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ *fd = resp.fd();
+ return 0;
+}
+
+int sys_timerfd_settime(int fd, int,
+ const struct itimerspec *value, struct itimerspec *oldvalue) {
+ __ensure(!oldvalue);
+ SignalGuard sguard;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::TIMERFD_SETTIME);
+ req.set_fd(fd);
+ req.set_time_secs(value->it_value.tv_sec);
+ req.set_time_nanos(value->it_value.tv_nsec);
+ req.set_interval_secs(value->it_interval.tv_sec);
+ req.set_interval_nanos(value->it_interval.tv_nsec);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+}
+
+int sys_signalfd_create(const sigset_t *masks, int flags, int *fd) {
+ __ensure(!(flags & ~(SFD_CLOEXEC | SFD_NONBLOCK)));
+
+ uint32_t proto_flags = 0;
+ if(flags & SFD_CLOEXEC)
+ proto_flags |= managarm::posix::OpenFlags::OF_CLOEXEC;
+ if(flags & SFD_NONBLOCK)
+ proto_flags |= managarm::posix::OpenFlags::OF_NONBLOCK;
+
+ SignalGuard sguard;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::SIGNALFD_CREATE);
+ req.set_flags(proto_flags);
+ req.set_sigset(*masks);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ *fd = resp.fd();
+ return 0;
+}
+
+int sys_inotify_create(int flags, int *fd) {
+ __ensure(!(flags & ~(IN_CLOEXEC | IN_NONBLOCK)));
+
+ SignalGuard sguard;
+
+ uint32_t proto_flags = 0;
+ if(flags & IN_CLOEXEC)
+ proto_flags |= managarm::posix::OpenFlags::OF_CLOEXEC;
+ if(flags & IN_NONBLOCK)
+ proto_flags |= managarm::posix::OpenFlags::OF_NONBLOCK;
+
+ managarm::posix::InotifyCreateRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_flags(proto_flags);
+
+ auto [offer, sendReq, recvResp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(sendReq.error());
+ HEL_CHECK(recvResp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recvResp.data(), recvResp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ *fd = resp.fd();
+ return 0;
+}
+
+int sys_inotify_add_watch(int ifd, const char *path, uint32_t mask, int *wd) {
+ SignalGuard sguard;
+
+ managarm::posix::InotifyAddRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_fd(ifd);
+ req.set_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), path));
+ req.set_flags(mask);
+
+ auto [offer, send_head, send_tail, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadTail(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(send_tail.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::FILE_NOT_FOUND) {
+ return ENOENT;
+ }else if(resp.error() == managarm::posix::Errors::BAD_FD) {
+ return EBADF;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ *wd = resp.wd();
+ return 0;
+ }
+}
+
+int sys_eventfd_create(unsigned int initval, int flags, int *fd) {
+ SignalGuard sguard;
+
+ uint32_t proto_flags = 0;
+ if (flags & EFD_NONBLOCK) proto_flags |= managarm::posix::OpenFlags::OF_NONBLOCK;
+ if (flags & EFD_CLOEXEC) proto_flags |= managarm::posix::OpenFlags::OF_CLOEXEC;
+ if (flags & EFD_SEMAPHORE)
+ return ENOSYS;
+
+ managarm::posix::EventfdCreateRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_flags(proto_flags);
+ req.set_initval(initval);
+
+ auto [offer, sendReq, recvResp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(sendReq.error());
+ HEL_CHECK(recvResp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recvResp.data(), recvResp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ *fd = resp.fd();
+ return 0;
+}
+
+int sys_open(const char *path, int flags, mode_t mode, int *fd) {
+ return sys_openat(AT_FDCWD, path, flags, mode, fd);
+}
+
+int sys_openat(int dirfd, const char *path, int flags, mode_t mode, int *fd) {
+ SignalGuard sguard;
+
+ // We do not support O_TMPFILE.
+ if(flags & O_TMPFILE)
+ return EOPNOTSUPP;
+
+ uint32_t proto_flags = 0;
+ if(flags & O_APPEND)
+ proto_flags |= managarm::posix::OpenFlags::OF_APPEND;
+ if(flags & O_CREAT)
+ proto_flags |= managarm::posix::OpenFlags::OF_CREATE;
+ if(flags & O_EXCL)
+ proto_flags |= managarm::posix::OpenFlags::OF_EXCLUSIVE;
+ if(flags & O_NONBLOCK)
+ proto_flags |= managarm::posix::OpenFlags::OF_NONBLOCK;
+ if(flags & O_TRUNC)
+ proto_flags |= managarm::posix::OpenFlags::OF_TRUNC;
+
+ if(flags & O_CLOEXEC)
+ proto_flags |= managarm::posix::OpenFlags::OF_CLOEXEC;
+ if(flags & O_NOCTTY)
+ proto_flags |= managarm::posix::OpenFlags::OF_NOCTTY;
+
+ if(flags & O_RDONLY)
+ proto_flags |= managarm::posix::OpenFlags::OF_RDONLY;
+ else if(flags & O_WRONLY)
+ proto_flags |= managarm::posix::OpenFlags::OF_WRONLY;
+ else if(flags & O_RDWR)
+ proto_flags |= managarm::posix::OpenFlags::OF_RDWR;
+ else if(flags & O_PATH)
+ proto_flags |= managarm::posix::OpenFlags::OF_PATH;
+
+ managarm::posix::OpenAtRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_fd(dirfd);
+ req.set_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), path));
+ req.set_flags(proto_flags);
+ req.set_mode(mode);
+
+ auto [offer, sendHead, sendTail, recvResp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadTail(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(sendHead.error());
+ HEL_CHECK(sendTail.error());
+ HEL_CHECK(recvResp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recvResp.data(), recvResp.length());
+ if(resp.error() == managarm::posix::Errors::FILE_NOT_FOUND) {
+ return ENOENT;
+ }else if(resp.error() == managarm::posix::Errors::ALREADY_EXISTS) {
+ return EEXIST;
+ }else if(resp.error() == managarm::posix::Errors::NOT_A_DIRECTORY) {
+ return ENOTDIR;
+ }else if(resp.error() == managarm::posix::Errors::ILLEGAL_OPERATION_TARGET) {
+ mlibc::infoLogger() << "\e[31mmlibc: openat unimplemented for this file " << path << "\e[39m" << frg::endlog;
+ return EINVAL;
+ }else if(resp.error() == managarm::posix::Errors::NO_BACKING_DEVICE) {
+ return ENXIO;
+ }else if(resp.error() == managarm::posix::Errors::IS_DIRECTORY) {
+ return EISDIR;
+ }else if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ *fd = resp.fd();
+ return 0;
+ }
+}
+
+int sys_mkfifoat(int dirfd, const char *path, mode_t mode) {
+ SignalGuard sguard;
+
+ managarm::posix::MkfifoAtRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_fd(dirfd);
+ req.set_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), path));
+ req.set_mode(mode);
+
+ auto [offer, send_head, send_tail, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadTail(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(send_tail.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::FILE_NOT_FOUND) {
+ return ENOENT;
+ }else if(resp.error() == managarm::posix::Errors::ALREADY_EXISTS) {
+ return EEXIST;
+ }else if(resp.error() == managarm::posix::Errors::BAD_FD) {
+ return EBADF;
+ }else if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ }else if(resp.error() == managarm::posix::Errors::INTERNAL_ERROR) {
+ return EIEIO;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+ }
+}
+
+int sys_mknodat(int dirfd, const char *path, int mode, int dev) {
+ SignalGuard sguard;
+
+ managarm::posix::MknodAtRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_dirfd(dirfd);
+ req.set_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), path));
+ req.set_mode(mode);
+ req.set_device(dev);
+
+ auto [offer, send_head, send_tail, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadTail(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(send_tail.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::FILE_NOT_FOUND) {
+ return ENOENT;
+ }else if(resp.error() == managarm::posix::Errors::ALREADY_EXISTS) {
+ return EEXIST;
+ }else if(resp.error() == managarm::posix::Errors::BAD_FD) {
+ return EBADF;
+ }else if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+ }
+}
+
+int sys_read(int fd, void *data, size_t max_size, ssize_t *bytes_read) {
+ SignalGuard sguard;
+
+ auto handle = getHandleForFd(fd);
+ if (!handle)
+ return EBADF;
+
+ managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_req_type(managarm::fs::CntReqType::READ);
+ req.set_fd(fd);
+ req.set_size(max_size);
+
+ frg::string<MemoryAllocator> ser(getSysdepsAllocator());
+ req.SerializeToString(&ser);
+
+ auto [offer, send_req, imbue_creds, recv_resp, recv_data] =
+ exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBuffer(ser.data(), ser.size()),
+ helix_ng::imbueCredentials(),
+ helix_ng::recvInline(),
+ helix_ng::recvBuffer(data, max_size)
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(imbue_creds.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+/* if(resp.error() == managarm::fs::Errors::NO_SUCH_FD) {
+ return EBADF;
+ }else*/
+ if(resp.error() == managarm::fs::Errors::ILLEGAL_ARGUMENT) {
+ return EINVAL;
+ }else if(resp.error() == managarm::fs::Errors::WOULD_BLOCK) {
+ return EAGAIN;
+ }else if(resp.error() == managarm::fs::Errors::END_OF_FILE) {
+ *bytes_read = 0;
+ return 0;
+ }else{
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ HEL_CHECK(recv_data.error());
+ *bytes_read = recv_data.actualLength();
+ return 0;
+ }
+}
+
+int sys_readv(int fd, const struct iovec *iovs, int iovc, ssize_t *bytes_read) {
+ for(int i = 0; i < iovc; i++) {
+ ssize_t intermed = 0;
+
+ if(int e = sys_read(fd, iovs[i].iov_base, iovs[i].iov_len, &intermed); e)
+ return e;
+ else if(intermed == 0)
+ break;
+
+ *bytes_read += intermed;
+ }
+
+ return 0;
+}
+
+int sys_write(int fd, const void *data, size_t size, ssize_t *bytes_written) {
+ SignalGuard sguard;
+
+ auto handle = getHandleForFd(fd);
+ if (!handle)
+ return EBADF;
+
+ managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_req_type(managarm::fs::CntReqType::WRITE);
+ req.set_fd(fd);
+ req.set_size(size);
+
+ auto [offer, send_req, imbue_creds, send_data, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::imbueCredentials(),
+ helix_ng::sendBuffer(data, size),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(imbue_creds.error());
+ HEL_CHECK(send_data.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+
+ // TODO: implement NO_SUCH_FD
+/* if(resp.error() == managarm::fs::Errors::NO_SUCH_FD) {
+ return EBADF;
+ }else*/ if(resp.error() == managarm::fs::Errors::ILLEGAL_OPERATION_TARGET) {
+ return EINVAL; // FD does not support writes.
+ }else if(resp.error() == managarm::fs::Errors::NO_SPACE_LEFT) {
+ return ENOSPC;
+ }else if(resp.error() == managarm::fs::Errors::WOULD_BLOCK) {
+ return EAGAIN;
+ }else if(resp.error() == managarm::fs::Errors::NOT_CONNECTED) {
+ return ENOTCONN;
+ }else{
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ if(bytes_written) {
+ *bytes_written = resp.size();
+ }
+ return 0;
+ }
+}
+
+int sys_pread(int fd, void *buf, size_t n, off_t off, ssize_t *bytes_read) {
+ SignalGuard sguard;
+
+ auto handle = getHandleForFd(fd);
+ if (!handle)
+ return EBADF;
+
+ managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_req_type(managarm::fs::CntReqType::PT_PREAD);
+ req.set_fd(fd);
+ req.set_size(n);
+ req.set_offset(off);
+
+ auto [offer, send_req, imbue_creds, recv_resp, recv_data] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::imbueCredentials(),
+ helix_ng::recvInline(),
+ helix_ng::recvBuffer(buf, n))
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(imbue_creds.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+/* if(resp.error() == managarm::fs::Errors::NO_SUCH_FD) {
+ return EBADF;
+ }else*/
+ if(resp.error() == managarm::fs::Errors::ILLEGAL_ARGUMENT) {
+ return EINVAL;
+ }else if(resp.error() == managarm::fs::Errors::WOULD_BLOCK) {
+ return EAGAIN;
+ }else if(resp.error() == managarm::fs::Errors::END_OF_FILE) {
+ *bytes_read = 0;
+ return 0;
+ }else{
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ HEL_CHECK(recv_data.error());
+ *bytes_read = recv_data.actualLength();
+ return 0;
+ }
+}
+
+int sys_pwrite(int fd, const void *buf, size_t n, off_t off, ssize_t *bytes_written) {
+ SignalGuard sguard;
+
+ auto handle = getHandleForFd(fd);
+ if (!handle)
+ return EBADF;
+
+ managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_req_type(managarm::fs::CntReqType::PT_PWRITE);
+ req.set_fd(fd);
+ req.set_size(n);
+ req.set_offset(off);
+
+ frg::string<MemoryAllocator> ser(getSysdepsAllocator());
+ req.SerializeToString(&ser);
+
+ auto [offer, send_head, imbue_creds, to_write, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::imbueCredentials(),
+ helix_ng::sendBuffer(buf, n),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(imbue_creds.error());
+ HEL_CHECK(to_write.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::fs::Errors::ILLEGAL_ARGUMENT) {
+ return EINVAL;
+ }else if(resp.error() == managarm::fs::Errors::WOULD_BLOCK) {
+ return EAGAIN;
+ }else if(resp.error() == managarm::fs::Errors::NO_SPACE_LEFT) {
+ return ENOSPC;
+ }else if(resp.error() == managarm::fs::Errors::SEEK_ON_PIPE) {
+ return ESPIPE;
+ }else if(resp.error() == managarm::fs::Errors::ILLEGAL_OPERATION_TARGET) {
+ return EINVAL;
+ }else{
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ *bytes_written = n;
+ return 0;
+ }
+}
+
+int sys_seek(int fd, off_t offset, int whence, off_t *new_offset) {
+ SignalGuard sguard;
+
+ auto handle = getHandleForFd(fd);
+ if(!handle)
+ return EBADF;
+
+ managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_fd(fd);
+ req.set_rel_offset(offset);
+
+ if(whence == SEEK_SET) {
+ req.set_req_type(managarm::fs::CntReqType::SEEK_ABS);
+ }else if(whence == SEEK_CUR) {
+ req.set_req_type(managarm::fs::CntReqType::SEEK_REL);
+ }else if(whence == SEEK_END) {
+ req.set_req_type(managarm::fs::CntReqType::SEEK_EOF);
+ }else{
+ return EINVAL;
+ }
+
+ frg::string<MemoryAllocator> ser(getSysdepsAllocator());
+ req.SerializeToString(&ser);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBuffer(ser.data(), ser.size()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::fs::Errors::SEEK_ON_PIPE) {
+ return ESPIPE;
+ } else if(resp.error() == managarm::fs::Errors::ILLEGAL_ARGUMENT) {
+ return EINVAL;
+ } else {
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ *new_offset = resp.offset();
+ return 0;
+ }
+}
+
+
+int sys_close(int fd) {
+ SignalGuard sguard;
+
+ managarm::posix::CloseRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_fd(fd);
+
+ auto [offer, sendReq, recvResp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(sendReq.error());
+ HEL_CHECK(recvResp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recvResp.data(), recvResp.length());
+
+ if(resp.error() == managarm::posix::Errors::NO_SUCH_FD) {
+ return EBADF;
+ }else if(resp.error() == managarm::posix::Errors::SUCCESS) {
+ return 0;
+ }else{
+ __ensure(!"Unexpected error");
+ __builtin_unreachable();
+ }
+}
+
+int sys_dup(int fd, int flags, int *newfd) {
+ SignalGuard sguard;
+
+ __ensure(!(flags & ~(O_CLOEXEC)));
+
+ uint32_t proto_flags = 0;
+ if(flags & O_CLOEXEC)
+ proto_flags |= managarm::posix::OpenFlags::OF_CLOEXEC;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::DUP);
+ req.set_fd(fd);
+ req.set_flags(proto_flags);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if (resp.error() == managarm::posix::Errors::BAD_FD) {
+ return EBADF;
+ } else if (resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ } else {
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ }
+
+ *newfd = resp.fd();
+ return 0;
+}
+
+int sys_dup2(int fd, int flags, int newfd) {
+ SignalGuard sguard;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::DUP2);
+ req.set_fd(fd);
+ req.set_newfd(newfd);
+ req.set_flags(flags);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+
+ if (resp.error() == managarm::posix::Errors::BAD_FD) {
+ return EBADF;
+ } else if (resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ } else {
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ }
+
+ return 0;
+}
+
+int sys_stat(fsfd_target fsfdt, int fd, const char *path, int flags, struct stat *result) {
+ SignalGuard sguard;
+
+ managarm::posix::FstatAtRequest<MemoryAllocator> req(getSysdepsAllocator());
+ if (fsfdt == fsfd_target::path) {
+ req.set_fd(AT_FDCWD);
+ req.set_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), path));
+ } else if (fsfdt == fsfd_target::fd) {
+ flags |= AT_EMPTY_PATH;
+ req.set_fd(fd);
+ } else {
+ __ensure(fsfdt == fsfd_target::fd_path);
+ req.set_fd(fd);
+ req.set_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), path));
+ }
+
+ if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) {
+ return EINVAL;
+ }
+
+ if (!(flags & AT_EMPTY_PATH) && (!path || !strlen(path))) {
+ return ENOENT;
+ }
+
+ req.set_flags(flags);
+
+ auto [offer, send_head, send_tail, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadTail(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(send_tail.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::FILE_NOT_FOUND) {
+ return ENOENT;
+ }else if(resp.error() == managarm::posix::Errors::BAD_FD) {
+ return EBADF;
+ }else if(resp.error() == managarm::posix::Errors::NOT_A_DIRECTORY) {
+ return ENOTDIR;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ memset(result, 0, sizeof(struct stat));
+
+ switch(resp.file_type()) {
+ case managarm::posix::FileType::FT_REGULAR:
+ result->st_mode = S_IFREG; break;
+ case managarm::posix::FileType::FT_DIRECTORY:
+ result->st_mode = S_IFDIR; break;
+ case managarm::posix::FileType::FT_SYMLINK:
+ result->st_mode = S_IFLNK; break;
+ case managarm::posix::FileType::FT_CHAR_DEVICE:
+ result->st_mode = S_IFCHR; break;
+ case managarm::posix::FileType::FT_BLOCK_DEVICE:
+ result->st_mode = S_IFBLK; break;
+ case managarm::posix::FileType::FT_SOCKET:
+ result->st_mode = S_IFSOCK; break;
+ case managarm::posix::FileType::FT_FIFO:
+ result->st_mode = S_IFIFO; break;
+ default:
+ __ensure(!resp.file_type());
+ }
+
+ result->st_dev = 1;
+ result->st_ino = resp.fs_inode();
+ result->st_mode |= resp.mode();
+ result->st_nlink = resp.num_links();
+ result->st_uid = resp.uid();
+ result->st_gid = resp.gid();
+ result->st_rdev = resp.ref_devnum();
+ result->st_size = resp.file_size();
+ result->st_atim.tv_sec = resp.atime_secs();
+ result->st_atim.tv_nsec = resp.atime_nanos();
+ result->st_mtim.tv_sec = resp.mtime_secs();
+ result->st_mtim.tv_nsec = resp.mtime_nanos();
+ result->st_ctim.tv_sec = resp.ctime_secs();
+ result->st_ctim.tv_nsec = resp.ctime_nanos();
+ result->st_blksize = 4096;
+ result->st_blocks = resp.file_size() / 512 + 1;
+ return 0;
+ }
+}
+
+int sys_readlink(const char *path, void *data, size_t max_size, ssize_t *length) {
+ SignalGuard sguard;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::READLINK);
+ req.set_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), path));
+
+ auto [offer, send_req, recv_resp, recv_data] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline(),
+ helix_ng::recvBuffer(data, max_size))
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::FILE_NOT_FOUND) {
+ return ENOENT;
+ }else if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ *length = recv_data.actualLength();
+ return 0;
+ }
+}
+
+int sys_rmdir(const char *path) {
+ SignalGuard sguard;
+
+ managarm::posix::RmdirRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), path));
+
+ auto [offer, send_head, send_tail, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadTail(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(send_tail.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::FILE_NOT_FOUND) {
+ return ENOENT;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+ }
+}
+
+int sys_ftruncate(int fd, size_t size) {
+ SignalGuard sguard;
+
+ auto handle = getHandleForFd(fd);
+ if (!handle)
+ return EBADF;
+
+ managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_req_type(managarm::fs::CntReqType::PT_TRUNCATE);
+ req.set_size(size);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::fs::Errors::ILLEGAL_OPERATION_TARGET) {
+ return EINVAL;
+ } else {
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ return 0;
+ }
+}
+
+int sys_fallocate(int fd, off_t offset, size_t size) {
+ SignalGuard sguard;
+
+ auto handle = getHandleForFd(fd);
+ if (!handle)
+ return EBADF;
+
+ managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_req_type(managarm::fs::CntReqType::PT_FALLOCATE);
+ req.set_rel_offset(offset);
+ req.set_size(size);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::fs::Errors::ILLEGAL_OPERATION_TARGET) {
+ return EINVAL;
+ }else if(resp.error() == managarm::fs::Errors::INSUFFICIENT_PERMISSIONS) {
+ return EPERM;
+ }else if(resp.error() == managarm::fs::Errors::ILLEGAL_ARGUMENT) {
+ return EINVAL;
+ }else{
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ return 0;
+ }
+ return 0;
+}
+
+int sys_unlinkat(int fd, const char *path, int flags) {
+ SignalGuard sguard;
+
+ managarm::posix::UnlinkAtRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_fd(fd);
+ req.set_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), path));
+ req.set_flags(flags);
+
+ auto [offer, send_head, send_tail, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadTail(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(send_tail.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::FILE_NOT_FOUND) {
+ return ENOENT;
+ }else if(resp.error() == managarm::posix::Errors::RESOURCE_IN_USE) {
+ return EBUSY;
+ }else if(resp.error() == managarm::posix::Errors::IS_DIRECTORY) {
+ return EISDIR;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+ }
+}
+
+int sys_access(const char *path, int mode) {
+ return sys_faccessat(AT_FDCWD, path, mode, 0);
+}
+
+int sys_faccessat(int dirfd, const char *pathname, int, int flags) {
+ SignalGuard sguard;
+
+ managarm::posix::AccessAtRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), pathname));
+ req.set_fd(dirfd);
+ req.set_flags(flags);
+
+ auto [offer, send_head, send_tail, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadTail(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(send_tail.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::FILE_NOT_FOUND) {
+ return ENOENT;
+ }else if(resp.error() == managarm::posix::Errors::NO_SUCH_FD) {
+ return EBADF;
+ }else if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+ }
+}
+
+int sys_flock(int fd, int opts) {
+ SignalGuard sguard;
+
+ managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_req_type(managarm::fs::CntReqType::FLOCK);
+ req.set_fd(fd);
+ req.set_flock_flags(opts);
+ auto handle = getHandleForFd(fd);
+ if(!handle) {
+ return EBADF;
+ }
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::WOULD_BLOCK) {
+ return EWOULDBLOCK;
+ }else if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ } else {
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+ }
+}
+
+int sys_isatty(int fd) {
+ SignalGuard sguard;
+
+ managarm::posix::IsTtyRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_fd(fd);
+
+ auto [offer, sendReq, recvResp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(sendReq.error());
+ HEL_CHECK(recvResp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recvResp.data(), recvResp.length());
+ if(resp.error() == managarm::posix::Errors::NO_SUCH_FD) {
+ return EBADF;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ if(resp.mode())
+ return 0;
+ return ENOTTY;
+ }
+}
+
+int sys_chmod(const char *pathname, mode_t mode) {
+ return sys_fchmodat(AT_FDCWD, pathname, mode, 0);
+}
+
+int sys_fchmod(int fd, mode_t mode) {
+ return sys_fchmodat(fd, "", mode, AT_EMPTY_PATH);
+}
+
+int sys_fchmodat(int fd, const char *pathname, mode_t mode, int flags) {
+ SignalGuard sguard;
+
+ managarm::posix::FchmodAtRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_fd(fd);
+ req.set_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), pathname));
+ req.set_mode(mode);
+ req.set_flags(flags);
+
+ auto [offer, send_head, send_tail, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadTail(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(send_tail.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::FILE_NOT_FOUND) {
+ return ENOENT;
+ }else if(resp.error() == managarm::posix::Errors::NO_SUCH_FD) {
+ return EBADF;
+ }else if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ }else if(resp.error() == managarm::posix::Errors::NOT_SUPPORTED) {
+ return ENOTSUP;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+ }
+}
+
+int sys_fchownat(int dirfd, const char *pathname, uid_t owner, gid_t group, int flags) {
+ (void)dirfd;
+ (void)pathname;
+ (void)owner;
+ (void)group;
+ (void)flags;
+ mlibc::infoLogger() << "mlibc: sys_fchownat is a stub!" << frg::endlog;
+ return 0;
+}
+
+int sys_umask(mode_t mode, mode_t *old) {
+ (void)mode;
+ mlibc::infoLogger() << "mlibc: sys_umask is a stub, hardcoding 022!" << frg::endlog;
+ *old = 022;
+ return 0;
+}
+
+int sys_utimensat(int dirfd, const char *pathname, const struct timespec times[2], int flags) {
+ SignalGuard sguard;
+
+ managarm::posix::UtimensAtRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_fd(dirfd);
+ if(pathname != nullptr)
+ req.set_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), pathname));
+ if(times) {
+ req.set_atimeSec(times[0].tv_sec);
+ req.set_atimeNsec(times[0].tv_nsec);
+ req.set_mtimeSec(times[1].tv_sec);
+ req.set_mtimeNsec(times[1].tv_nsec);
+ } else {
+ req.set_atimeSec(UTIME_NOW);
+ req.set_atimeNsec(UTIME_NOW);
+ req.set_mtimeSec(UTIME_NOW);
+ req.set_mtimeNsec(UTIME_NOW);
+ }
+ req.set_flags(flags);
+
+ auto [offer, send_head, send_tail, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadTail(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(send_tail.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::FILE_NOT_FOUND) {
+ return ENOENT;
+ }else if(resp.error() == managarm::posix::Errors::NO_SUCH_FD) {
+ return EBADF;
+ }else if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ }else if(resp.error() == managarm::posix::Errors::NOT_SUPPORTED) {
+ return ENOTSUP;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+ }
+}
+
+int sys_getentropy(void *buffer, size_t length) {
+ SignalGuard sguard;
+ auto p = reinterpret_cast<char *>(buffer);
+ size_t n = 0;
+
+ while(n < length) {
+ size_t chunk;
+ HEL_CHECK(helGetRandomBytes(p + n, length - n, &chunk));
+ n+= chunk;
+ }
+
+ return 0;
+}
+
+int sys_gethostname(char *buffer, size_t bufsize) {
+ SignalGuard sguard;
+ mlibc::infoLogger() << "mlibc: gethostname always returns managarm" << frg::endlog;
+ char name[10] = "managarm\0";
+ if(bufsize < 10)
+ return ENAMETOOLONG;
+ strncpy(buffer, name, 10);
+ return 0;
+}
+
+int sys_fsync(int) {
+ mlibc::infoLogger() << "mlibc: fsync is a stub" << frg::endlog;
+ return 0;
+}
+
+int sys_memfd_create(const char *name, int flags, int *fd) {
+ SignalGuard sguard;
+
+ managarm::posix::MemFdCreateRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_name(frg::string<MemoryAllocator>(getSysdepsAllocator(), name));
+ req.set_flags(flags);
+
+ auto [offer, send_head, send_tail, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadTail(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(send_tail.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ }
+
+ *fd = resp.fd();
+
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+}
+
+int sys_uname(struct utsname *buf) {
+ __ensure(buf);
+ mlibc::infoLogger() << "\e[31mmlibc: uname() returns static information\e[39m" << frg::endlog;
+ strcpy(buf->sysname, "Managarm");
+ strcpy(buf->nodename, "managarm");
+ strcpy(buf->release, "0.0.1-rolling");
+ strcpy(buf->version, "Managarm is not Managram");
+#if defined(__x86_64__)
+ strcpy(buf->machine, "x86_64");
+#elif defined (__aarch64__)
+ strcpy(buf->machine, "aarch64");
+#else
+# error Unknown architecture
+#endif
+
+ return 0;
+}
+
+int sys_madvise(void *, size_t, int) {
+ mlibc::infoLogger() << "mlibc: sys_madvise is a stub!" << frg::endlog;
+ return 0;
+}
+
+int sys_ptsname(int fd, char *buffer, size_t length) {
+ int index;
+ if(int e = sys_ioctl(fd, TIOCGPTN, &index, NULL); e)
+ return e;
+ if((size_t)snprintf(buffer, length, "/dev/pts/%d", index) >= length) {
+ return ERANGE;
+ }
+ return 0;
+}
+
+int sys_unlockpt(int fd) {
+ int unlock = 0;
+
+ if(int e = sys_ioctl(fd, TIOCSPTLCK, &unlock, NULL); e)
+ return e;
+
+ return 0;
+}
+
+} //namespace mlibc
+
diff --git a/lib/mlibc/sysdeps/managarm/generic/fork-exec.cpp b/lib/mlibc/sysdeps/managarm/generic/fork-exec.cpp
new file mode 100644
index 0000000..8da0e1e
--- /dev/null
+++ b/lib/mlibc/sysdeps/managarm/generic/fork-exec.cpp
@@ -0,0 +1,744 @@
+
+// for _Exit()
+#include <stdlib.h>
+
+#include <string.h>
+#include <errno.h>
+
+// for fork() and execve()
+#include <unistd.h>
+// for sched_yield()
+#include <sched.h>
+#include <stdio.h>
+// for getrusage()
+#include <sys/resource.h>
+// for waitpid()
+#include <sys/wait.h>
+#include <pthread.h>
+
+#include <bits/ensure.h>
+#include <mlibc/allocator.hpp>
+#include <mlibc/debug.hpp>
+#include <mlibc/posix-pipe.hpp>
+#include <mlibc/thread-entry.hpp>
+#include <mlibc/all-sysdeps.hpp>
+#include <posix.frigg_bragi.hpp>
+#include <protocols/posix/supercalls.hpp>
+
+namespace mlibc {
+
+int sys_futex_tid() {
+ HelWord tid = 0;
+ HEL_CHECK(helSyscall0_1(kHelCallSuper + posix::superGetTid,
+ &tid));
+
+ return tid;
+}
+
+int sys_futex_wait(int *pointer, int expected, const struct timespec *time) {
+ // This implementation is inherently signal-safe.
+ if(time) {
+ if(helFutexWait(pointer, expected, time->tv_nsec + time->tv_sec * 1000000000))
+ return -1;
+ return 0;
+ }
+ if(helFutexWait(pointer, expected, -1))
+ return -1;
+ return 0;
+}
+
+int sys_futex_wake(int *pointer) {
+ // This implementation is inherently signal-safe.
+ if(helFutexWake(pointer))
+ return -1;
+ return 0;
+}
+
+int sys_waitpid(pid_t pid, int *status, int flags, struct rusage *ru, pid_t *ret_pid) {
+ if(ru) {
+ mlibc::infoLogger() << "mlibc: struct rusage in sys_waitpid is unsupported" << frg::endlog;
+ return ENOSYS;
+ }
+
+ SignalGuard sguard;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::WAIT);
+ req.set_pid(pid);
+ req.set_flags(flags);
+
+ auto [offer, send_head, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ }
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ if(status)
+ *status = resp.mode();
+ *ret_pid = resp.pid();
+ return 0;
+}
+
+int sys_waitid(idtype_t idtype, id_t id, siginfo_t *info, int options) {
+ SignalGuard sguard;
+
+ managarm::posix::WaitIdRequest<MemoryAllocator> req(getSysdepsAllocator());
+
+ req.set_idtype(idtype);
+ req.set_id(id);
+ req.set_flags(options);
+
+ auto [offer, send_head, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::WaitIdResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ }
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ info->si_pid = resp.pid();
+ info->si_uid = resp.uid();
+ info->si_signo = SIGCHLD;
+ info->si_status = resp.sig_status();
+ info->si_code = resp.sig_code();
+ return 0;
+}
+
+void sys_exit(int status) {
+ // This implementation is inherently signal-safe.
+ HEL_CHECK(helSyscall1(kHelCallSuper + posix::superExit, status));
+ __builtin_trap();
+}
+
+void sys_yield() {
+ // This implementation is inherently signal-safe.
+ HEL_CHECK(helYield());
+}
+
+int sys_sleep(time_t *secs, long *nanos) {
+ SignalGuard sguard;
+ globalQueue.trim();
+
+ uint64_t now;
+ HEL_CHECK(helGetClock(&now));
+
+ uint64_t async_id;
+ HEL_CHECK(helSubmitAwaitClock(now + uint64_t(*secs) * 1000000000 + uint64_t(*nanos),
+ globalQueue.getQueue(), 0, &async_id));
+
+ auto element = globalQueue.dequeueSingle();
+ auto result = parseSimple(element);
+ HEL_CHECK(result->error);
+
+ *secs = 0;
+ *nanos = 0;
+
+ return 0;
+}
+
+int sys_fork(pid_t *child) {
+ // This implementation is inherently signal-safe.
+ int res;
+
+ sigset_t full_sigset;
+ res = sigfillset(&full_sigset);
+ __ensure(!res);
+
+ sigset_t former_sigset;
+ res = sigprocmask(SIG_SETMASK, &full_sigset, &former_sigset);
+ __ensure(!res);
+
+ HelWord out;
+ HEL_CHECK(helSyscall0_1(kHelCallSuper + posix::superFork, &out));
+ *child = out;
+
+ if(!out) {
+ clearCachedInfos();
+ globalQueue.recreateQueue();
+ }
+
+ res = sigprocmask(SIG_SETMASK, &former_sigset, nullptr);
+ __ensure(!res);
+
+ return 0;
+}
+
+int sys_execve(const char *path, char *const argv[], char *const envp[]) {
+ // TODO: Make this function signal-safe!
+ frg::string<MemoryAllocator> args_area(getSysdepsAllocator());
+ for(auto it = argv; *it; ++it)
+ args_area += frg::string_view{*it, strlen(*it) + 1};
+
+ frg::string<MemoryAllocator> env_area(getSysdepsAllocator());
+ for(auto it = envp; *it; ++it)
+ env_area += frg::string_view{*it, strlen(*it) + 1};
+
+ uintptr_t out;
+
+ HEL_CHECK(helSyscall6_1(kHelCallSuper + posix::superExecve,
+ reinterpret_cast<uintptr_t>(path),
+ strlen(path),
+ reinterpret_cast<uintptr_t>(args_area.data()),
+ args_area.size(),
+ reinterpret_cast<uintptr_t>(env_area.data()),
+ env_area.size(),
+ &out));
+
+ return out;
+}
+
+gid_t sys_getgid() {
+ SignalGuard sguard;
+
+ managarm::posix::GetGidRequest<MemoryAllocator> req(getSysdepsAllocator());
+
+ auto [offer, send_head, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return resp.uid();
+}
+
+int sys_setgid(gid_t gid) {
+ SignalGuard sguard;
+
+ managarm::posix::SetGidRequest<MemoryAllocator> req(getSysdepsAllocator());
+
+ req.set_uid(gid);
+
+ auto [offer, send_head, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::ACCESS_DENIED) {
+ return EPERM;
+ }else if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+ }
+}
+
+gid_t sys_getegid() {
+ SignalGuard sguard;
+
+ managarm::posix::GetEgidRequest<MemoryAllocator> req(getSysdepsAllocator());
+
+ auto [offer, send_head, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return resp.uid();
+}
+
+int sys_setegid(gid_t egid) {
+ SignalGuard sguard;
+
+ managarm::posix::SetEgidRequest<MemoryAllocator> req(getSysdepsAllocator());
+
+ req.set_uid(egid);
+
+ auto [offer, send_head, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::ACCESS_DENIED) {
+ return EPERM;
+ }else if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+ }
+}
+
+uid_t sys_getuid() {
+ SignalGuard sguard;
+
+ managarm::posix::GetUidRequest<MemoryAllocator> req(getSysdepsAllocator());
+
+ auto [offer, send_head, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return resp.uid();
+}
+
+int sys_setuid(uid_t uid) {
+ SignalGuard sguard;
+
+ managarm::posix::SetUidRequest<MemoryAllocator> req(getSysdepsAllocator());
+
+ req.set_uid(uid);
+
+ auto [offer, send_head, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::ACCESS_DENIED) {
+ return EPERM;
+ }else if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+ }
+}
+
+uid_t sys_geteuid() {
+ SignalGuard sguard;
+
+ managarm::posix::GetEuidRequest<MemoryAllocator> req(getSysdepsAllocator());
+
+ auto [offer, send_head, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return resp.uid();
+}
+
+int sys_seteuid(uid_t euid) {
+ SignalGuard sguard;
+
+ managarm::posix::SetEuidRequest<MemoryAllocator> req(getSysdepsAllocator());
+
+ req.set_uid(euid);
+
+ auto [offer, send_head, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::ACCESS_DENIED) {
+ return EPERM;
+ }else if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+ }
+}
+
+pid_t sys_gettid() {
+ // TODO: use an actual gettid syscall.
+ return sys_getpid();
+}
+
+pid_t sys_getpid() {
+ SignalGuard sguard;
+
+ managarm::posix::GetPidRequest<MemoryAllocator> req(getSysdepsAllocator());
+
+ auto [offer, send_head, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return resp.pid();
+}
+
+pid_t sys_getppid() {
+ SignalGuard sguard;
+
+ managarm::posix::GetPpidRequest<MemoryAllocator> req(getSysdepsAllocator());
+
+ auto [offer, send_head, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return resp.pid();
+}
+
+int sys_getsid(pid_t pid, pid_t *sid) {
+ SignalGuard sguard;
+
+ managarm::posix::GetSidRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_pid(pid);
+
+ auto [offer, send_head, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::NO_SUCH_RESOURCE) {
+ *sid = 0;
+ return ESRCH;
+ } else {
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ *sid = resp.pid();
+ return 0;
+ }
+}
+
+int sys_getpgid(pid_t pid, pid_t *pgid) {
+ SignalGuard sguard;
+
+ managarm::posix::GetPgidRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_pid(pid);
+
+ auto [offer, send_head, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::NO_SUCH_RESOURCE) {
+ *pgid = 0;
+ return ESRCH;
+ } else {
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ *pgid = resp.pid();
+ return 0;
+ }
+}
+
+int sys_setpgid(pid_t pid, pid_t pgid) {
+ SignalGuard sguard;
+
+ managarm::posix::SetPgidRequest<MemoryAllocator> req(getSysdepsAllocator());
+
+ req.set_pid(pid);
+ req.set_pgid(pgid);
+
+ auto [offer, send_head, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::posix::Errors::INSUFFICIENT_PERMISSION) {
+ return EPERM;
+ }else if(resp.error() == managarm::posix::Errors::NO_SUCH_RESOURCE) {
+ return ESRCH;
+ }else if(resp.error() == managarm::posix::Errors::ACCESS_DENIED) {
+ return EACCES;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+ }
+}
+
+int sys_getrusage(int scope, struct rusage *usage) {
+ memset(usage, 0, sizeof(struct rusage));
+
+ SignalGuard sguard;
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::GET_RESOURCE_USAGE);
+ req.set_mode(scope);
+
+ auto [offer, send_head, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+
+ usage->ru_utime.tv_sec = resp.ru_user_time() / 1'000'000'000;
+ usage->ru_utime.tv_usec = (resp.ru_user_time() % 1'000'000'000) / 1'000;
+
+ return 0;
+}
+
+int sys_getschedparam(void *tcb, int *policy, struct sched_param *param) {
+ if(tcb != mlibc::get_current_tcb()) {
+ return ESRCH;
+ }
+
+ *policy = SCHED_OTHER;
+ int prio = 0;
+ // TODO(no92): use helGetPriority(kHelThisThread) here
+ mlibc::infoLogger() << "\e[31mlibc: sys_getschedparam always returns priority 0\e[39m" << frg::endlog;
+ param->sched_priority = prio;
+
+ return 0;
+}
+
+int sys_setschedparam(void *tcb, int policy, const struct sched_param *param) {
+ if(tcb != mlibc::get_current_tcb()) {
+ return ESRCH;
+ }
+
+ if(policy != SCHED_OTHER) {
+ return EINVAL;
+ }
+
+ HEL_CHECK(helSetPriority(kHelThisThread, param->sched_priority));
+
+ return 0;
+}
+
+int sys_clone(void *tcb, pid_t *pid_out, void *stack) {
+ HelWord pid = 0;
+ HEL_CHECK(helSyscall2_1(kHelCallSuper + posix::superClone,
+ reinterpret_cast<HelWord>(__mlibc_start_thread),
+ reinterpret_cast<HelWord>(stack),
+ &pid));
+
+ if (pid_out)
+ *pid_out = pid;
+
+ return 0;
+}
+
+int sys_tcb_set(void *pointer) {
+#if defined(__aarch64__)
+ uintptr_t addr = reinterpret_cast<uintptr_t>(pointer);
+ addr += sizeof(Tcb) - 0x10;
+ asm volatile ("msr tpidr_el0, %0" :: "r"(addr));
+#else
+ HEL_CHECK(helWriteFsBase(pointer));
+#endif
+ return 0;
+}
+
+void sys_thread_exit() {
+ // This implementation is inherently signal-safe.
+ HEL_CHECK(helSyscall1(kHelCallSuper + posix::superExit, 0));
+ __builtin_trap();
+}
+
+int sys_thread_setname(void *tcb, const char *name) {
+ if(strlen(name) > 15) {
+ return ERANGE;
+ }
+
+ auto t = reinterpret_cast<Tcb *>(tcb);
+ char *path;
+ int cs = 0;
+
+ if(asprintf(&path, "/proc/self/task/%d/comm", t->tid) < 0) {
+ return ENOMEM;
+ }
+
+ pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs);
+
+ int fd;
+ if(int e = sys_open(path, O_WRONLY, 0, &fd); e) {
+ return e;
+ }
+
+ if(int e = sys_write(fd, name, strlen(name) + 1, NULL)) {
+ return e;
+ }
+
+ sys_close(fd);
+
+ pthread_setcancelstate(cs, 0);
+
+ return 0;
+}
+
+int sys_thread_getname(void *tcb, char *name, size_t size) {
+ auto t = reinterpret_cast<Tcb *>(tcb);
+ char *path;
+ int cs = 0;
+ ssize_t real_size = 0;
+
+ if(asprintf(&path, "/proc/self/task/%d/comm", t->tid) < 0) {
+ return ENOMEM;
+ }
+
+ pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs);
+
+ int fd;
+ if(int e = sys_open(path, O_RDONLY | O_CLOEXEC, 0, &fd); e) {
+ return e;
+ }
+
+ if(int e = sys_read(fd, name, size, &real_size)) {
+ return e;
+ }
+
+ name[real_size - 1] = 0;
+ sys_close(fd);
+
+ pthread_setcancelstate(cs, 0);
+
+ if(static_cast<ssize_t>(size) <= real_size) {
+ return ERANGE;
+ }
+
+ return 0;
+}
+
+
+} //namespace mlibc
+
diff --git a/lib/mlibc/sysdeps/managarm/generic/ioctl.cpp b/lib/mlibc/sysdeps/managarm/generic/ioctl.cpp
new file mode 100644
index 0000000..384c09e
--- /dev/null
+++ b/lib/mlibc/sysdeps/managarm/generic/ioctl.cpp
@@ -0,0 +1,708 @@
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/cdrom.h>
+#include <linux/input.h>
+#include <linux/kd.h>
+#include <linux/vt.h>
+#include <sys/ioctl.h>
+
+#include <bits/ensure.h>
+#include <mlibc/all-sysdeps.hpp>
+#include <mlibc/allocator.hpp>
+#include <mlibc/debug.hpp>
+#include <mlibc/posix-pipe.hpp>
+
+#include <fs.frigg_bragi.hpp>
+#include <posix.frigg_bragi.hpp>
+
+namespace mlibc {
+
+static constexpr bool logIoctls = false;
+
+int ioctl_drm(int fd, unsigned long request, void *arg, int *result, HelHandle handle);
+
+int sys_ioctl(int fd, unsigned long request, void *arg, int *result) {
+ if(logIoctls)
+ mlibc::infoLogger() << "mlibc: ioctl with"
+ << " type: 0x" << frg::hex_fmt(_IOC_TYPE(request))
+ << ", number: 0x" << frg::hex_fmt(_IOC_NR(request))
+ << " (raw request: " << frg::hex_fmt(request) << ")"
+ << " on fd " << fd << frg::endlog;
+
+ SignalGuard sguard;
+ auto handle = getHandleForFd(fd);
+ if(!handle)
+ return EBADF;
+
+ if(_IOC_TYPE(request) == 'd') {
+ return ioctl_drm(fd, request, arg, result, handle);
+ }
+
+ managarm::fs::IoctlRequest<MemoryAllocator> ioctl_req(getSysdepsAllocator());
+
+ switch(request) {
+ case FIONBIO: {
+ auto mode = reinterpret_cast<int *>(arg);
+ int flags = fcntl(fd, F_GETFL, 0);
+ if(*mode) {
+ fcntl(fd, F_SETFL, flags | O_NONBLOCK);
+ }else{
+ fcntl(fd, F_SETFL, flags & ~O_NONBLOCK);
+ }
+ return 0;
+ }
+ case FIONREAD: {
+ auto argp = reinterpret_cast<int *>(arg);
+
+ auto handle = getHandleForFd(fd);
+ if (!handle)
+ return EBADF;
+
+ if(!argp)
+ return EINVAL;
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(FIONREAD);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] =
+ exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::fs::Errors::NOT_CONNECTED) {
+ return ENOTCONN;
+ } else {
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ *argp = resp.fionread_count();
+
+ return 0;
+ }
+ }
+ case FIOCLEX: {
+ managarm::posix::IoctlFioclexRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_fd(fd);
+
+ auto [offer, sendReq, recvResp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(sendReq.error());
+ if(recvResp.error() == kHelErrDismissed)
+ return EINVAL;
+ HEL_CHECK(recvResp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recvResp.data(), recvResp.length());
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+ }
+ case TCGETS: {
+ auto param = reinterpret_cast<struct termios *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp, recv_attrs] = exchangeMsgsSync(handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline(),
+ helix_ng::recvBuffer(param, sizeof(struct termios))
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ if(send_req.error() == kHelErrDismissed)
+ return EINVAL;
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+ HEL_CHECK(recv_attrs.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ __ensure(recv_attrs.actualLength() == sizeof(struct termios));
+ *result = resp.result();
+ return 0;
+ }
+ case TCSETS: {
+ auto param = reinterpret_cast<struct termios *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ auto [offer, send_ioctl_req, send_req, send_attrs, recv_resp] = exchangeMsgsSync(handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::sendBuffer(param, sizeof(struct termios)),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ if(send_req.error() == kHelErrDismissed)
+ return EINVAL;
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(send_attrs.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ if(result)
+ *result = resp.result();
+ return 0;
+ }
+ case TIOCSCTTY: {
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ auto [offer, send_ioctl_req, send_req, imbue_creds, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::imbueCredentials(),
+ helix_ng::recvInline())
+ );
+
+ HEL_CHECK(offer.error());
+ if(send_req.error() == kHelErrDismissed)
+ return EINVAL;
+ HEL_CHECK(send_ioctl_req.error());
+ HEL_CHECK(imbue_creds.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+
+ if(resp.error() == managarm::fs::Errors::ILLEGAL_ARGUMENT) {
+ return EINVAL;
+ }else if(resp.error() == managarm::fs::Errors::INSUFFICIENT_PERMISSIONS) {
+ return EPERM;
+ }
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ *result = resp.result();
+ return 0;
+ }
+ case TIOCGWINSZ: {
+ auto param = reinterpret_cast<struct winsize *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ if(send_req.error() == kHelErrDismissed)
+ return EINVAL;
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::fs::Errors::ILLEGAL_OPERATION_TARGET)
+ return EINVAL;
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ *result = resp.result();
+ param->ws_col = resp.pts_width();
+ param->ws_row = resp.pts_height();
+ param->ws_xpixel = resp.pts_pixel_width();
+ param->ws_ypixel = resp.pts_pixel_height();
+ return 0;
+ }
+ case TIOCSWINSZ: {
+ auto param = reinterpret_cast<const struct winsize *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+ req.set_pts_width(param->ws_col);
+ req.set_pts_height(param->ws_row);
+ req.set_pts_pixel_width(param->ws_xpixel);
+ req.set_pts_pixel_height(param->ws_ypixel);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ if(send_req.error() == kHelErrDismissed)
+ return EINVAL;
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ *result = resp.result();
+ return 0;
+ }
+ case TIOCGPTN: {
+ auto param = reinterpret_cast<int *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ if(send_req.error() == kHelErrDismissed)
+ return EINVAL;
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ *param = resp.pts_index();
+ if(result)
+ *result = resp.result();
+ return 0;
+ }
+ case TIOCGPGRP: {
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ frg::string<MemoryAllocator> ser(getSysdepsAllocator());
+ req.SerializeToString(&ser);
+
+ auto [offer, send_ioctl_req, send_req, imbue_creds, recv_resp] =
+ exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBuffer(ser.data(), ser.size()),
+ helix_ng::imbueCredentials(),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ if(send_req.error())
+ return EINVAL;
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(imbue_creds.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::fs::Errors::NOT_A_TERMINAL) {
+ return ENOTTY;
+ }
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ *result = resp.result();
+ *static_cast<int *>(arg) = resp.pid();
+ return 0;
+ }
+ case TIOCSPGRP: {
+ auto param = reinterpret_cast<int *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+ req.set_pgid(*param);
+
+ frg::string<MemoryAllocator> ser(getSysdepsAllocator());
+ req.SerializeToString(&ser);
+
+ auto [offer, send_ioctl_req, send_req, imbue_creds, recv_resp] =
+ exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBuffer(ser.data(), ser.size()),
+ helix_ng::imbueCredentials(),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ if(send_req.error() == kHelErrDismissed)
+ return EINVAL;
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(imbue_creds.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::fs::Errors::INSUFFICIENT_PERMISSIONS) {
+ return EPERM;
+ } else if(resp.error() == managarm::fs::Errors::ILLEGAL_ARGUMENT) {
+ return EINVAL;
+ }
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ *result = resp.result();
+ return 0;
+ }
+ case TIOCGSID: {
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ frg::string<MemoryAllocator> ser(getSysdepsAllocator());
+ req.SerializeToString(&ser);
+
+ auto [offer, send_ioctl_req, send_req, imbue_creds, recv_resp] =
+ exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBuffer(ser.data(), ser.size()),
+ helix_ng::imbueCredentials(),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ if(send_ioctl_req.error())
+ return EINVAL;
+ HEL_CHECK(send_ioctl_req.error());
+ if(send_req.error())
+ return EINVAL;
+ HEL_CHECK(send_req.error());
+ if(imbue_creds.error() == kHelErrDismissed)
+ return EINVAL;
+ HEL_CHECK(imbue_creds.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::fs::Errors::NOT_A_TERMINAL) {
+ return ENOTTY;
+ }
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ *result = resp.result();
+ *static_cast<int *>(arg) = resp.pid();
+ return 0;
+ }
+ case CDROM_GET_CAPABILITY: {
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+
+ frg::string<MemoryAllocator> ser(getSysdepsAllocator());
+ req.SerializeToString(&ser);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] =
+ exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBuffer(ser.data(), ser.size()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ if(send_ioctl_req.error())
+ return EINVAL;
+ HEL_CHECK(send_ioctl_req.error());
+ if(send_req.error())
+ return EINVAL;
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::fs::Errors::NOT_A_TERMINAL) {
+ return ENOTTY;
+ }
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ *result = resp.result();
+ return 0;
+ }
+ } // end of switch()
+
+
+ if(_IOC_TYPE(request) == 'E'
+ && _IOC_NR(request) == _IOC_NR(EVIOCGVERSION)) {
+ *reinterpret_cast<int *>(arg) = 0x010001; // should be EV_VERSION
+ *result = 0;
+ return 0;
+ }else if(_IOC_TYPE(request) == 'E'
+ && _IOC_NR(request) == _IOC_NR(EVIOCGID)) {
+ memset(arg, 0, sizeof(struct input_id));
+ *result = 0;
+ return 0;
+ }else if(_IOC_TYPE(request) == 'E'
+ && _IOC_NR(request) == _IOC_NR(EVIOCGNAME(0))) {
+ const char *s = "Managarm generic evdev";
+ auto chunk = frg::min(_IOC_SIZE(request), strlen(s) + 1);
+ memcpy(arg, s, chunk);
+ *result = chunk;
+ return 0;
+ }else if(_IOC_TYPE(request) == 'E'
+ && _IOC_NR(request) == _IOC_NR(EVIOCGPHYS(0))) {
+ // Returns the sysfs path of the device.
+ const char *s = "input0";
+ auto chunk = frg::min(_IOC_SIZE(request), strlen(s) + 1);
+ memcpy(arg, s, chunk);
+ *result = chunk;
+ return 0;
+ }else if(_IOC_TYPE(request) == 'E'
+ && _IOC_NR(request) == _IOC_NR(EVIOCGUNIQ(0))) {
+ // Returns a unique ID for the device.
+ const char *s = "0";
+ auto chunk = frg::min(_IOC_SIZE(request), strlen(s) + 1);
+ memcpy(arg, s, chunk);
+ *result = chunk;
+ return 0;
+ }else if(_IOC_TYPE(request) == 'E'
+ && _IOC_NR(request) == _IOC_NR(EVIOCGPROP(0))) {
+ // Returns a bitmask of properties of the device.
+ auto size = _IOC_SIZE(request);
+ memset(arg, 0, size);
+ *result = size;
+ return 0;
+ }else if(_IOC_TYPE(request) == 'E'
+ && _IOC_NR(request) == _IOC_NR(EVIOCGKEY(0))) {
+ // Returns the current key state.
+ auto size = _IOC_SIZE(request);
+ memset(arg, 0, size);
+ *result = size;
+ return 0;
+ }else if(_IOC_TYPE(request) == 'E'
+ && _IOC_NR(request) == _IOC_NR(EVIOCGLED(0))) {
+ // Returns the current LED state.
+ auto size = _IOC_SIZE(request);
+ memset(arg, 0, size);
+ *result = size;
+ return 0;
+ }else if(_IOC_TYPE(request) == 'E'
+ && _IOC_NR(request) == _IOC_NR(EVIOCGSW(0))) {
+ auto size = _IOC_SIZE(request);
+ memset(arg, 0, size);
+ *result = size;
+ return 0;
+ }else if(_IOC_TYPE(request) == 'E'
+ && _IOC_NR(request) >= _IOC_NR(EVIOCGBIT(0, 0))
+ && _IOC_NR(request) <= _IOC_NR(EVIOCGBIT(EV_MAX, 0))) {
+ // Returns a bitmask of capabilities of the device.
+ // If type is zero, return a mask of supported types.
+ // As EV_SYN is zero, this implies that it is impossible
+ // to get the mask of supported synthetic events.
+ auto type = _IOC_NR(request) - _IOC_NR(EVIOCGBIT(0, 0));
+ if(!type) {
+ // TODO: Check with the Linux ABI if we have to do this.
+ memset(arg, 0, _IOC_SIZE(request));
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(EVIOCGBIT(0, 0));
+ req.set_size(_IOC_SIZE(request));
+
+ auto [offer, send_ioctl_req, send_req, recv_resp, recv_data] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline(),
+ helix_ng::recvBuffer(arg, _IOC_SIZE(request)))
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ if(send_req.error() == kHelErrDismissed)
+ return EINVAL;
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+ HEL_CHECK(recv_data.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ *result = recv_data.actualLength();
+ return 0;
+ }else{
+ // TODO: Check with the Linux ABI if we have to do this.
+ memset(arg, 0, _IOC_SIZE(request));
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(EVIOCGBIT(1, 0));
+ req.set_input_type(type);
+ req.set_size(_IOC_SIZE(request));
+
+ auto [offer, send_ioctl_req, send_req, recv_resp, recv_data] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline(),
+ helix_ng::recvBuffer(arg, _IOC_SIZE(request)))
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ if(send_req.error() == kHelErrDismissed)
+ return EINVAL;
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+ HEL_CHECK(recv_data.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ *result = recv_data.actualLength();
+ return 0;
+ }
+ }else if(_IOC_TYPE(request) == 'E'
+ && _IOC_NR(request) == _IOC_NR(EVIOCSCLOCKID)) {
+ auto param = reinterpret_cast<int *>(arg);
+
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(request);
+ req.set_input_clock(*param);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ if(send_req.error() == kHelErrDismissed)
+ return EINVAL;
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ *result = resp.result();
+ return 0;
+ }else if(_IOC_TYPE(request) == 'E'
+ && _IOC_NR(request) >= _IOC_NR(EVIOCGABS(0))
+ && _IOC_NR(request) <= _IOC_NR(EVIOCGABS(ABS_MAX))) {
+ auto param = reinterpret_cast<struct input_absinfo *>(arg);
+
+ auto type = _IOC_NR(request) - _IOC_NR(EVIOCGABS(0));
+ managarm::fs::GenericIoctlRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_command(EVIOCGABS(0));
+ req.set_input_type(type);
+
+ auto [offer, send_ioctl_req, send_req, recv_resp] = exchangeMsgsSync(handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(ioctl_req, getSysdepsAllocator()),
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_ioctl_req.error());
+ if(send_req.error() == kHelErrDismissed)
+ return EINVAL;
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::GenericIoctlReply<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ param->value = resp.input_value();
+ param->minimum = resp.input_min();
+ param->maximum = resp.input_max();
+ param->fuzz = resp.input_fuzz();
+ param->flat = resp.input_flat();
+ param->resolution = resp.input_resolution();
+
+ *result = resp.result();
+ return 0;
+ }else if(request == KDSETMODE) {
+ auto param = reinterpret_cast<unsigned int *>(arg);
+ mlibc::infoLogger() << "\e[35mmlibc: KD_SETMODE(" << frg::hex_fmt(param) << ") is a no-op" << frg::endlog;
+
+ *result = 0;
+ return 0;
+ }else if(request == KDGETMODE) {
+ auto param = reinterpret_cast<unsigned int *>(arg);
+ mlibc::infoLogger() << "\e[35mmlibc: KD_GETMODE is a no-op" << frg::endlog;
+ *param = 0;
+
+ *result = 0;
+ return 0;
+ }else if(request == KDSKBMODE) {
+ auto param = reinterpret_cast<long>(arg);
+ mlibc::infoLogger() << "\e[35mmlibc: KD_SKBMODE(" << frg::hex_fmt(param) << ") is a no-op" << frg::endlog;
+
+ *result = 0;
+ return 0;
+ }else if(request == VT_SETMODE) {
+ // auto param = reinterpret_cast<struct vt_mode *>(arg);
+ mlibc::infoLogger() << "\e[35mmlibc: VT_SETMODE is a no-op" << frg::endlog;
+
+ *result = 0;
+ return 0;
+ }else if(request == VT_GETSTATE) {
+ auto param = reinterpret_cast<struct vt_stat *>(arg);
+
+ param->v_active = 0;
+ param->v_signal = 0;
+ param->v_state = 0;
+
+ mlibc::infoLogger() << "\e[35mmlibc: VT_GETSTATE is a no-op" << frg::endlog;
+
+ *result = 0;
+ return 0;
+ }else if(request == VT_ACTIVATE || request == VT_WAITACTIVE) {
+ mlibc::infoLogger() << "\e[35mmlibc: VT_ACTIVATE/VT_WAITACTIVE are no-ops" << frg::endlog;
+ *result = 0;
+ return 0;
+ }else if(request == TIOCSPTLCK) {
+ mlibc::infoLogger() << "\e[35mmlibc: TIOCSPTLCK is a no-op" << frg::endlog;
+ if(result)
+ *result = 0;
+ return 0;
+ }
+
+ mlibc::infoLogger() << "mlibc: Unexpected ioctl with"
+ << " type: 0x" << frg::hex_fmt(_IOC_TYPE(request))
+ << ", number: 0x" << frg::hex_fmt(_IOC_NR(request))
+ << " (raw request: " << frg::hex_fmt(request) << ")" << frg::endlog;
+ __ensure(!"Illegal ioctl request");
+ __builtin_unreachable();
+}
+
+} //namespace mlibc
diff --git a/lib/mlibc/sysdeps/managarm/generic/memory.cpp b/lib/mlibc/sysdeps/managarm/generic/memory.cpp
new file mode 100644
index 0000000..91e47af
--- /dev/null
+++ b/lib/mlibc/sysdeps/managarm/generic/memory.cpp
@@ -0,0 +1,30 @@
+
+#include <string.h>
+
+#include <bits/ensure.h>
+#include <mlibc/allocator.hpp>
+#include <mlibc/all-sysdeps.hpp>
+#include <protocols/posix/supercalls.hpp>
+
+#include <hel.h>
+#include <hel-syscalls.h>
+
+namespace mlibc {
+
+int sys_anon_allocate(size_t size, void **pointer) {
+ // This implementation is inherently signal-safe.
+ __ensure(!(size & 0xFFF));
+ HelWord out;
+ HEL_CHECK(helSyscall1_1(kHelCallSuper + posix::superAnonAllocate, size, &out));
+ *pointer = reinterpret_cast<void *>(out);
+ return 0;
+}
+
+int sys_anon_free(void *pointer, size_t size) {
+ // This implementation is inherently signal-safe.
+ HEL_CHECK(helSyscall2(kHelCallSuper + posix::superAnonDeallocate, (HelWord)pointer, size));
+ return 0;
+}
+
+} //namespace mlibc
+
diff --git a/lib/mlibc/sysdeps/managarm/generic/mount.cpp b/lib/mlibc/sysdeps/managarm/generic/mount.cpp
new file mode 100644
index 0000000..5677b20
--- /dev/null
+++ b/lib/mlibc/sysdeps/managarm/generic/mount.cpp
@@ -0,0 +1,44 @@
+#include <errno.h>
+#include <string.h>
+#include <sys/mount.h>
+
+#include <bits/ensure.h>
+#include <mlibc/allocator.hpp>
+#include <mlibc/posix-pipe.hpp>
+#include <mlibc/all-sysdeps.hpp>
+#include <posix.frigg_bragi.hpp>
+#include <bragi/helpers-frigg.hpp>
+
+namespace mlibc {
+
+int sys_mount(const char *source, const char *target,
+ const char *fstype, unsigned long, const void *) {
+ SignalGuard sguard;
+
+ managarm::posix::MountRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), source));
+ req.set_target_path(frg::string<MemoryAllocator>(getSysdepsAllocator(), target));
+ req.set_fs_type(frg::string<MemoryAllocator>(getSysdepsAllocator(), fstype));
+
+ auto [offer, send_head, send_tail, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadTail(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(send_tail.error());
+ HEL_CHECK(recv_resp.error());
+
+ auto resp = *bragi::parse_head_only<managarm::posix::SvrResponse>(recv_resp, getSysdepsAllocator());
+ if(resp.error() == managarm::posix::Errors::FILE_NOT_FOUND)
+ return ENOENT;
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ return 0;
+}
+
+} //namespace mlibc
diff --git a/lib/mlibc/sysdeps/managarm/generic/net.cpp b/lib/mlibc/sysdeps/managarm/generic/net.cpp
new file mode 100644
index 0000000..63f2d0c
--- /dev/null
+++ b/lib/mlibc/sysdeps/managarm/generic/net.cpp
@@ -0,0 +1,57 @@
+#include <mlibc/all-sysdeps.hpp>
+#include <sys/ioctl.h>
+#include <net/if.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+
+namespace mlibc {
+
+int sys_if_indextoname(unsigned int index, char *name) {
+ int fd = 0;
+ int r = sys_socket(AF_UNIX, SOCK_DGRAM | SOCK_CLOEXEC, AF_UNSPEC, &fd);
+
+ if(r)
+ return r;
+
+ struct ifreq ifr;
+ ifr.ifr_ifindex = index;
+
+ int res = 0;
+ int ret = sys_ioctl(fd, SIOCGIFNAME, &ifr, &res);
+ close(fd);
+
+ if(ret) {
+ if(ret == ENODEV)
+ return ENXIO;
+ return ret;
+ }
+
+ strncpy(name, ifr.ifr_name, IF_NAMESIZE);
+
+ return 0;
+}
+
+int sys_if_nametoindex(const char *name, unsigned int *ret) {
+ int fd = 0;
+ int r = sys_socket(AF_UNIX, SOCK_DGRAM | SOCK_CLOEXEC, AF_UNSPEC, &fd);
+
+ if(r)
+ return r;
+
+ struct ifreq ifr;
+ strncpy(ifr.ifr_name, name, sizeof ifr.ifr_name);
+
+ int res = 0;
+ r = sys_ioctl(fd, SIOCGIFINDEX, &ifr, &res);
+ close(fd);
+
+ if(r)
+ return r;
+
+ *ret = ifr.ifr_ifindex;
+
+ return 0;
+}
+
+} //namespace mlibc
diff --git a/lib/mlibc/sysdeps/managarm/generic/sched.cpp b/lib/mlibc/sysdeps/managarm/generic/sched.cpp
new file mode 100644
index 0000000..bce8db8
--- /dev/null
+++ b/lib/mlibc/sysdeps/managarm/generic/sched.cpp
@@ -0,0 +1,102 @@
+#include <bits/ensure.h>
+#include <unistd.h>
+
+#include <hel.h>
+#include <hel-syscalls.h>
+#include <mlibc/debug.hpp>
+#include <mlibc/allocator.hpp>
+#include <mlibc/posix-pipe.hpp>
+#include <mlibc/posix-sysdeps.hpp>
+
+#include <posix.frigg_bragi.hpp>
+
+namespace mlibc {
+
+int sys_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask) {
+ return sys_getthreadaffinity(pid, cpusetsize, mask);
+}
+
+int sys_getthreadaffinity(pid_t tid, size_t cpusetsize, cpu_set_t *mask) {
+ SignalGuard sguard;
+
+ managarm::posix::GetAffinityRequest<MemoryAllocator> req(getSysdepsAllocator());
+
+ req.set_pid(tid);
+ req.set_size(cpusetsize);
+
+ auto [offer, send_head, recv_resp, recv_data] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline(),
+ helix_ng::recvBuffer(mask, cpusetsize)
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+
+ if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ } else if(resp.error() != managarm::posix::Errors::SUCCESS) {
+ mlibc::infoLogger() << "mlibc: got unexpected error from posix in sys_getaffinity!" << frg::endlog;
+ return EIEIO;
+ }
+ HEL_CHECK(recv_data.error());
+
+ return 0;
+}
+
+int sys_setaffinity(pid_t pid, size_t cpusetsize, const cpu_set_t *mask) {
+ return sys_setthreadaffinity(pid, cpusetsize, mask);
+}
+
+int sys_setthreadaffinity(pid_t tid, size_t cpusetsize, const cpu_set_t *mask) {
+ SignalGuard sguard;
+
+ frg::vector<uint8_t, MemoryAllocator> affinity_mask(getSysdepsAllocator());
+ affinity_mask.resize(cpusetsize);
+ memcpy(affinity_mask.data(), mask, cpusetsize);
+ managarm::posix::SetAffinityRequest<MemoryAllocator> req(getSysdepsAllocator());
+
+ req.set_pid(tid);
+ req.set_mask(affinity_mask);
+
+ auto [offer, send_head, send_tail, recv_resp] =
+ exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadTail(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_head.error());
+ HEL_CHECK(send_tail.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+
+ if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ } else if(resp.error() != managarm::posix::Errors::SUCCESS) {
+ mlibc::infoLogger() << "mlibc: got unexpected error from posix in sys_getaffinity!" << frg::endlog;
+ return EIEIO;
+ }
+
+ return 0;
+}
+
+int sys_getcpu(int *cpu) {
+ HEL_CHECK(helGetCurrentCpu(cpu));
+ return 0;
+}
+
+}
diff --git a/lib/mlibc/sysdeps/managarm/generic/signals.cpp b/lib/mlibc/sysdeps/managarm/generic/signals.cpp
new file mode 100644
index 0000000..486f3d4
--- /dev/null
+++ b/lib/mlibc/sysdeps/managarm/generic/signals.cpp
@@ -0,0 +1,139 @@
+#include <bits/ensure.h>
+#include <signal.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <errno.h>
+
+#include <hel.h>
+#include <hel-syscalls.h>
+
+#include <mlibc/debug.hpp>
+#include <mlibc/allocator.hpp>
+#include <mlibc/posix-pipe.hpp>
+#include <mlibc/all-sysdeps.hpp>
+#include <posix.frigg_bragi.hpp>
+
+#include <bragi/helpers-frigg.hpp>
+#include <helix/ipc-structs.hpp>
+
+#include <protocols/posix/supercalls.hpp>
+
+extern "C" void __mlibc_signal_restore();
+
+namespace mlibc {
+
+int sys_sigprocmask(int how, const sigset_t *set, sigset_t *retrieve) {
+ // This implementation is inherently signal-safe.
+ uint64_t former, unused;
+ if(set) {
+ HEL_CHECK(helSyscall2_2(kHelObserveSuperCall + posix::superSigMask, how, *set, &former, &unused));
+ }else{
+ HEL_CHECK(helSyscall2_2(kHelObserveSuperCall + posix::superSigMask, 0, 0, &former, &unused));
+ }
+ if(retrieve)
+ *retrieve = former;
+ return 0;
+}
+
+int sys_sigaction(int number, const struct sigaction *__restrict action,
+ struct sigaction *__restrict saved_action) {
+ SignalGuard sguard;
+
+ // TODO: Respect restorer. __ensure(!(action->sa_flags & SA_RESTORER));
+
+ managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_request_type(managarm::posix::CntReqType::SIG_ACTION);
+ req.set_sig_number(number);
+ if(action) {
+ req.set_mode(1);
+ req.set_flags(action->sa_flags);
+ req.set_sig_mask(action->sa_mask);
+ if(action->sa_flags & SA_SIGINFO) {
+ req.set_sig_handler(reinterpret_cast<uintptr_t>(action->sa_sigaction));
+ }else{
+ req.set_sig_handler(reinterpret_cast<uintptr_t>(action->sa_handler));
+ }
+ req.set_sig_restorer(reinterpret_cast<uintptr_t>(&__mlibc_signal_restore));
+ } else {
+ req.set_mode(0);
+ }
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+
+ if(resp.error() == managarm::posix::Errors::ILLEGAL_REQUEST) {
+ // This is only returned for servers, not for normal userspace.
+ return ENOSYS;
+ }else if(resp.error() == managarm::posix::Errors::ILLEGAL_ARGUMENTS) {
+ return EINVAL;
+ }
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+
+ if(saved_action) {
+ saved_action->sa_flags = resp.flags();
+ saved_action->sa_mask = resp.sig_mask();
+ if(resp.flags() & SA_SIGINFO) {
+ saved_action->sa_sigaction =
+ reinterpret_cast<void (*)(int, siginfo_t *, void *)>(resp.sig_handler());
+ }else{
+ saved_action->sa_handler = reinterpret_cast<void (*)(int)>(resp.sig_handler());
+ }
+ // TODO: saved_action->sa_restorer = resp.sig_restorer;
+ }
+ return 0;
+}
+
+int sys_kill(int pid, int number) {
+ // This implementation is inherently signal-safe.
+ HEL_CHECK(helSyscall2(kHelObserveSuperCall + posix::superSigKill, pid, number));
+ return 0;
+}
+
+int sys_tgkill(int, int tid, int number) {
+ return sys_kill(tid, number);
+}
+
+int sys_sigaltstack(const stack_t *ss, stack_t *oss) {
+ HelWord out;
+
+ // This implementation is inherently signal-safe.
+ HEL_CHECK(helSyscall2_1(kHelObserveSuperCall + posix::superSigAltStack,
+ reinterpret_cast<HelWord>(ss),
+ reinterpret_cast<HelWord>(oss),
+ &out));
+
+ return out;
+}
+
+int sys_sigsuspend(const sigset_t *set) {
+ //SignalGuard sguard;
+ uint64_t former, seq, unused;
+
+ HEL_CHECK(helSyscall2_2(kHelObserveSuperCall + posix::superSigMask, SIG_SETMASK, *set, &former, &seq));
+ HEL_CHECK(helSyscall1(kHelObserveSuperCall + posix::superSigSuspend, seq));
+ HEL_CHECK(helSyscall2_2(kHelObserveSuperCall + posix::superSigMask, SIG_SETMASK, former, &unused, &unused));
+
+ return EINTR;
+}
+
+int sys_sigpending(sigset_t *set) {
+ uint64_t pendingMask;
+
+ HEL_CHECK(helSyscall0_1(kHelObserveSuperCall + posix::superSigGetPending, &pendingMask));
+ *set = pendingMask;
+
+ return 0;
+}
+
+} //namespace mlibc
diff --git a/lib/mlibc/sysdeps/managarm/generic/socket.cpp b/lib/mlibc/sysdeps/managarm/generic/socket.cpp
new file mode 100644
index 0000000..5bdc1fc
--- /dev/null
+++ b/lib/mlibc/sysdeps/managarm/generic/socket.cpp
@@ -0,0 +1,423 @@
+
+#include <bits/ensure.h>
+#include <errno.h>
+#include <sys/socket.h>
+#include <linux/netlink.h>
+#include <netinet/tcp.h>
+#include <netinet/in.h>
+
+#include <mlibc/allocator.hpp>
+#include <mlibc/debug.hpp>
+#include <mlibc/posix-pipe.hpp>
+#include <mlibc/all-sysdeps.hpp>
+#include <fs.frigg_bragi.hpp>
+#include <posix.frigg_bragi.hpp>
+
+namespace {
+
+int fcntl_helper(int fd, int request, int *result, ...) {
+ va_list args;
+ va_start(args, result);
+ if(!mlibc::sys_fcntl) {
+ return ENOSYS;
+ }
+ int ret = mlibc::sys_fcntl(fd, request, args, result);
+ va_end(args);
+ return ret;
+}
+
+}
+
+namespace mlibc {
+
+int sys_accept(int fd, int *newfd, struct sockaddr *addr_ptr, socklen_t *addr_length, int flags) {
+ SignalGuard sguard;
+
+ managarm::posix::AcceptRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_fd(fd);
+
+ auto [offer, sendReq, recvResp] = exchangeMsgsSync(
+ getPosixLane(),
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(sendReq.error());
+ HEL_CHECK(recvResp.error());
+
+ managarm::posix::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recvResp.data(), recvResp.length());
+ if(resp.error() == managarm::posix::Errors::WOULD_BLOCK) {
+ return EWOULDBLOCK;
+ }else{
+ __ensure(resp.error() == managarm::posix::Errors::SUCCESS);
+ *newfd = resp.fd();
+ }
+
+ if(addr_ptr && addr_length) {
+ if(int e = mlibc::sys_peername(*newfd, addr_ptr, *addr_length, addr_length); e) {
+ errno = e;
+ return -1;
+ }
+ }
+
+ if(flags & SOCK_NONBLOCK) {
+ int fcntl_ret = 0;
+ fcntl_helper(*newfd, F_GETFL, &fcntl_ret);
+ fcntl_helper(*newfd, F_SETFL, &fcntl_ret, fcntl_ret | O_NONBLOCK);
+ }
+
+ if(flags & SOCK_CLOEXEC) {
+ int fcntl_ret = 0;
+ fcntl_helper(*newfd, F_GETFD, &fcntl_ret);
+ fcntl_helper(*newfd, F_SETFD, &fcntl_ret, fcntl_ret | FD_CLOEXEC);
+ }
+
+ return 0;
+}
+
+int sys_bind(int fd, const struct sockaddr *addr_ptr, socklen_t addr_length) {
+ SignalGuard sguard;
+ auto handle = getHandleForFd(fd);
+ if (!handle)
+ return EBADF;
+
+ managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_req_type(managarm::fs::CntReqType::PT_BIND);
+
+ auto [offer, send_req, send_creds, send_buf, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::imbueCredentials(),
+ helix_ng::sendBuffer(addr_ptr, addr_length),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(send_creds.error());
+ HEL_CHECK(send_buf.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::fs::Errors::FILE_NOT_FOUND) {
+ return ENOENT;
+ } else if(resp.error() == managarm::fs::Errors::ADDRESS_IN_USE) {
+ return EADDRINUSE;
+ } else if(resp.error() == managarm::fs::Errors::ALREADY_EXISTS) {
+ return EINVAL;
+ } else if(resp.error() == managarm::fs::Errors::ILLEGAL_OPERATION_TARGET) {
+ return EINVAL;
+ } else if(resp.error() == managarm::fs::Errors::ILLEGAL_ARGUMENT) {
+ return EINVAL;
+ } else if(resp.error() == managarm::fs::Errors::ACCESS_DENIED) {
+ return EACCES;
+ } else if(resp.error() == managarm::fs::Errors::ADDRESS_NOT_AVAILABLE) {
+ return EADDRNOTAVAIL;
+ }
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ return 0;
+}
+
+int sys_connect(int fd, const struct sockaddr *addr_ptr, socklen_t addr_length) {
+ SignalGuard sguard;
+
+ auto handle = getHandleForFd(fd);
+ if (!handle)
+ return EBADF;
+
+ managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_req_type(managarm::fs::CntReqType::PT_CONNECT);
+
+ frg::string<MemoryAllocator> ser(getSysdepsAllocator());
+ req.SerializeToString(&ser);
+
+ auto [offer, send_req, imbue_creds, send_addr, recv_resp] =
+ exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBuffer(ser.data(), ser.size()),
+ helix_ng::imbueCredentials(),
+ helix_ng::sendBuffer(const_cast<struct sockaddr *>(addr_ptr), addr_length),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(imbue_creds.error());
+ HEL_CHECK(send_addr.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::fs::Errors::FILE_NOT_FOUND) {
+ return ENOENT;
+ } else if(resp.error() == managarm::fs::Errors::ILLEGAL_ARGUMENT) {
+ return EINVAL;
+ }
+
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ return 0;
+}
+
+int sys_sockname(int fd, struct sockaddr *addr_ptr, socklen_t max_addr_length,
+ socklen_t *actual_length) {
+ SignalGuard sguard;
+
+ auto handle = getHandleForFd(fd);
+ if (!handle)
+ return EBADF;
+
+ managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_req_type(managarm::fs::CntReqType::PT_SOCKNAME);
+ req.set_fd(fd);
+ req.set_size(max_addr_length);
+
+ auto [offer, send_req, recv_resp, recv_addr] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline(),
+ helix_ng::recvBuffer(addr_ptr, max_addr_length))
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ if(resp.error() == managarm::fs::Errors::ILLEGAL_OPERATION_TARGET) {
+ return ENOTSOCK;
+ }
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ HEL_CHECK(recv_addr.error());
+ *actual_length = resp.file_size();
+ return 0;
+}
+
+int sys_peername(int fd, struct sockaddr *addr_ptr, socklen_t max_addr_length,
+ socklen_t *actual_length) {
+ SignalGuard sguard;
+
+ auto handle = getHandleForFd(fd);
+ if (!handle)
+ return EBADF;
+
+ managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_req_type(managarm::fs::CntReqType::PT_PEERNAME);
+ req.set_fd(fd);
+ req.set_size(max_addr_length);
+
+ frg::string<MemoryAllocator> ser(getSysdepsAllocator());
+ req.SerializeToString(&ser);
+
+ auto [offer, sendReq, recvResp, recvData] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBuffer(ser.data(), ser.size()),
+ helix_ng::recvInline(),
+ helix_ng::recvBuffer(addr_ptr, max_addr_length)
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(sendReq.error());
+ if(recvResp.error() == kHelErrDismissed)
+ return ENOTSOCK;
+ HEL_CHECK(recvResp.error());
+ HEL_CHECK(recvData.error());
+
+ managarm::fs::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recvResp.data(), recvResp.length());
+ if(resp.error() == managarm::fs::Errors::ILLEGAL_OPERATION_TARGET) {
+ return ENOTSOCK;
+ }else if(resp.error() == managarm::fs::Errors::NOT_CONNECTED) {
+ return ENOTCONN;
+ }else{
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ *actual_length = resp.file_size();
+ return 0;
+ }
+}
+
+int sys_getsockopt(int fd, int layer, int number,
+ void *__restrict buffer, socklen_t *__restrict size) {
+ SignalGuard sguard;
+
+ if(layer == SOL_SOCKET && number == SO_PEERCRED) {
+ if(*size != sizeof(struct ucred))
+ return EINVAL;
+
+ auto handle = getHandleForFd(fd);
+ if (!handle)
+ return EBADF;
+
+ managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_req_type(managarm::fs::CntReqType::PT_GET_OPTION);
+ req.set_command(SO_PEERCRED);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+
+ struct ucred creds;
+ creds.pid = resp.pid();
+ creds.uid = resp.uid();
+ creds.gid = resp.gid();
+ memcpy(buffer, &creds, sizeof(struct ucred));
+ return 0;
+ }else if(layer == SOL_SOCKET && number == SO_SNDBUF) {
+ mlibc::infoLogger() << "\e[31mmlibc: getsockopt() call with SOL_SOCKET and SO_SNDBUF is unimplemented\e[39m" << frg::endlog;
+ *(int *)buffer = 4096;
+ return 0;
+ }else if(layer == SOL_SOCKET && number == SO_TYPE) {
+ mlibc::infoLogger() << "\e[31mmlibc: getsockopt() call with SOL_SOCKET and SO_TYPE is unimplemented, hardcoding SOCK_STREAM\e[39m" << frg::endlog;
+ *(int *)buffer = SOCK_STREAM;
+ return 0;
+ }else if(layer == SOL_SOCKET && number == SO_ERROR) {
+ mlibc::infoLogger() << "\e[31mmlibc: getsockopt() call with SOL_SOCKET and SO_ERROR is unimplemented, hardcoding 0\e[39m" << frg::endlog;
+ *(int *)buffer = 0;
+ return 0;
+ }else if(layer == SOL_SOCKET && number == SO_KEEPALIVE) {
+ mlibc::infoLogger() << "\e[31mmlibc: getsockopt() call with SOL_SOCKET and SO_KEEPALIVE is unimplemented, hardcoding 0\e[39m" << frg::endlog;
+ *(int *)buffer = 0;
+ return 0;
+ }else if(layer == SOL_SOCKET && number == SO_LINGER) {
+ mlibc::infoLogger() << "\e[31mmlibc: getsockopt() call with SOL_SOCKET and SO_LINGER is unimplemented, hardcoding 0\e[39m" << frg::endlog;
+ *(int *)buffer = 0;
+ return 0;
+ }else{
+ mlibc::panicLogger() << "\e[31mmlibc: Unexpected getsockopt() call, layer: " << layer << " number: " << number << "\e[39m" << frg::endlog;
+ __builtin_unreachable();
+ }
+}
+
+int sys_setsockopt(int fd, int layer, int number,
+ const void *buffer, socklen_t size) {
+ SignalGuard sguard;
+
+ if(layer == SOL_SOCKET && number == SO_PASSCRED) {
+ int value;
+ __ensure(size == sizeof(int));
+ memcpy(&value, buffer, sizeof(int));
+
+ auto handle = getHandleForFd(fd);
+ if (!handle)
+ return EBADF;
+
+ managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_req_type(managarm::fs::CntReqType::PT_SET_OPTION);
+ req.set_command(SO_PASSCRED);
+ req.set_value(value);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()),
+ helix_ng::recvInline())
+ );
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ return 0;
+ }else if(layer == SOL_SOCKET && number == SO_ATTACH_FILTER) {
+ mlibc::infoLogger() << "\e[31mmlibc: setsockopt(SO_ATTACH_FILTER) is not implemented"
+ " correctly\e[39m" << frg::endlog;
+ return 0;
+ }else if(layer == SOL_SOCKET && number == SO_RCVBUFFORCE) {
+ mlibc::infoLogger() << "\e[31mmlibc: setsockopt(SO_RCVBUFFORCE) is not implemented"
+ " correctly\e[39m" << frg::endlog;
+ return 0;
+ }else if(layer == SOL_SOCKET && number == SO_SNDBUF) {
+ mlibc::infoLogger() << "\e[31mmlibc: setsockopt() call with SOL_SOCKET and SO_SNDBUF is unimplemented\e[39m" << frg::endlog;
+ return 0;
+ }else if(layer == SOL_SOCKET && number == SO_KEEPALIVE) {
+ mlibc::infoLogger() << "\e[31mmlibc: setsockopt() call with SOL_SOCKET and SO_KEEPALIVE is unimplemented\e[39m" << frg::endlog;
+ return 0;
+ }else if(layer == SOL_SOCKET && number == SO_REUSEADDR) {
+ mlibc::infoLogger() << "\e[31mmlibc: setsockopt() call with SOL_SOCKET and SO_REUSEADDR is unimplemented\e[39m" << frg::endlog;
+ return 0;
+ }else if(layer == SOL_SOCKET && number == SO_REUSEPORT) {
+ mlibc::infoLogger() << "\e[31mmlibc: setsockopt() call with SOL_SOCKET and SO_REUSEPORT is unimplemented\e[39m" << frg::endlog;
+ return 0;
+ }else if(layer == SOL_SOCKET && number == SO_RCVBUF) {
+ mlibc::infoLogger() << "\e[31mmlibc: setsockopt() call with SOL_SOCKET and SO_RCVBUF is unimplemented\e[39m" << frg::endlog;
+ return 0;
+ }else if(layer == IPPROTO_TCP && number == TCP_NODELAY) {
+ mlibc::infoLogger() << "\e[31mmlibc: setsockopt() call with IPPROTO_TCP and TCP_NODELAY is unimplemented\e[39m" << frg::endlog;
+ return 0;
+ }else if(layer == SOL_SOCKET && number == SO_ACCEPTCONN) {
+ mlibc::infoLogger() << "\e[31mmlibc: setsockopt() call with SOL_SOCKET and SO_ACCEPTCONN is unimplemented\e[39m" << frg::endlog;
+ return 0;
+ }else if(layer == IPPROTO_TCP && number == TCP_KEEPIDLE) {
+ mlibc::infoLogger() << "\e[31mmlibc: setsockopt() call with IPPROTO_TCP and TCP_KEEPIDLE is unimplemented\e[39m" << frg::endlog;
+ return 0;
+ }else if(layer == SOL_NETLINK && number == NETLINK_EXT_ACK) {
+ mlibc::infoLogger() << "\e[31mmlibc: setsockopt() call with SOL_NETLINK and NETLINK_EXT_ACK is unimplemented\e[39m" << frg::endlog;
+ return 0;
+ }else if(layer == SOL_NETLINK && number == NETLINK_GET_STRICT_CHK) {
+ mlibc::infoLogger() << "\e[31mmlibc: setsockopt() call with SOL_NETLINK and NETLINK_EXT_ACK is unimplemented\e[39m" << frg::endlog;
+ return 0;
+ }else if(layer == IPPROTO_TCP && number == TCP_KEEPINTVL) {
+ mlibc::infoLogger() << "\e[31mmlibc: setsockopt() call with IPPROTO_TCP and TCP_KEEPINTVL is unimplemented\e[39m" << frg::endlog;
+ return 0;
+ }else if(layer == IPPROTO_TCP && number == TCP_KEEPCNT) {
+ mlibc::infoLogger() << "\e[31mmlibc: setsockopt() call with IPPROTO_TCP and TCP_KEEPCNT is unimplemented\e[39m" << frg::endlog;
+ return 0;
+ }else{
+ mlibc::panicLogger() << "\e[31mmlibc: Unexpected setsockopt() call, layer: " << layer << " number: " << number << "\e[39m" << frg::endlog;
+ __builtin_unreachable();
+ }
+}
+
+int sys_listen(int fd, int) {
+ SignalGuard sguard;
+
+ auto handle = getHandleForFd(fd);
+ if (!handle)
+ return EBADF;
+
+ managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
+ req.set_req_type(managarm::fs::CntReqType::PT_LISTEN);
+
+ frg::string<MemoryAllocator> ser(getSysdepsAllocator());
+ req.SerializeToString(&ser);
+
+ auto [offer, send_req, recv_resp] = exchangeMsgsSync(
+ handle,
+ helix_ng::offer(
+ helix_ng::sendBuffer(ser.data(), ser.size()),
+ helix_ng::recvInline()
+ )
+ );
+
+ HEL_CHECK(offer.error());
+ HEL_CHECK(send_req.error());
+ HEL_CHECK(recv_resp.error());
+
+ managarm::fs::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator());
+ resp.ParseFromArray(recv_resp.data(), recv_resp.length());
+ __ensure(resp.error() == managarm::fs::Errors::SUCCESS);
+ return 0;
+}
+
+} //namespace mlibc
diff --git a/lib/mlibc/sysdeps/managarm/generic/time.cpp b/lib/mlibc/sysdeps/managarm/generic/time.cpp
new file mode 100644
index 0000000..468a738
--- /dev/null
+++ b/lib/mlibc/sysdeps/managarm/generic/time.cpp
@@ -0,0 +1,81 @@
+
+#include <bits/ensure.h>
+#include <time.h>
+#include <string.h>
+#include <sys/time.h>
+
+#include <hel.h>
+#include <hel-syscalls.h>
+#include <mlibc/debug.hpp>
+#include <mlibc/allocator.hpp>
+#include <mlibc/posix-pipe.hpp>
+#include <mlibc/all-sysdeps.hpp>
+
+struct TrackerPage {
+ uint64_t seqlock;
+ int32_t state;
+ int32_t padding;
+ int64_t refClock;
+ int64_t baseRealtime;
+};
+
+extern thread_local TrackerPage *__mlibc_clk_tracker_page;
+
+namespace mlibc {
+
+int sys_clock_get(int clock, time_t *secs, long *nanos) {
+ // This implementation is inherently signal-safe.
+ if(clock == CLOCK_MONOTONIC || clock == CLOCK_MONOTONIC_RAW || clock == CLOCK_MONOTONIC_COARSE) {
+ uint64_t tick;
+ HEL_CHECK(helGetClock(&tick));
+ *secs = tick / 1000000000;
+ *nanos = tick % 1000000000;
+ }else if(clock == CLOCK_REALTIME) {
+ cacheFileTable();
+
+ // Start the seqlock read.
+ auto seqlock = __atomic_load_n(&__mlibc_clk_tracker_page->seqlock, __ATOMIC_ACQUIRE);
+ __ensure(!(seqlock & 1));
+
+ // Perform the actual loads.
+ auto ref = __atomic_load_n(&__mlibc_clk_tracker_page->refClock, __ATOMIC_RELAXED);
+ auto base = __atomic_load_n(&__mlibc_clk_tracker_page->baseRealtime, __ATOMIC_RELAXED);
+
+ // Finish the seqlock read.
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+ __ensure(__atomic_load_n(&__mlibc_clk_tracker_page->seqlock, __ATOMIC_RELAXED) == seqlock);
+
+ // Calculate the current time.
+ uint64_t tick;
+ HEL_CHECK(helGetClock(&tick));
+ __ensure(tick >= (uint64_t)__mlibc_clk_tracker_page->refClock); // TODO: Respect the seqlock!
+ tick -= ref;
+ tick += base;
+ *secs = tick / 1000000000;
+ *nanos = tick % 1000000000;
+ }else if(clock == CLOCK_PROCESS_CPUTIME_ID) {
+ mlibc::infoLogger() << "\e[31mmlibc: clock_gettime does not support the CPU time clocks"
+ "\e[39m" << frg::endlog;
+ *secs = 0;
+ *nanos = 0;
+ }else if(clock == CLOCK_BOOTTIME) {
+ mlibc::infoLogger() << "\e[31mmlibc: clock_gettime does not support CLOCK_BOOTTIME"
+ "\e[39m" << frg::endlog;
+ *secs = 0;
+ *nanos = 0;
+ }else{
+ mlibc::panicLogger() << "mlibc: Unexpected clock " << clock << frg::endlog;
+ }
+ return 0;
+}
+
+int sys_clock_getres(int clock, time_t *secs, long *nanos) {
+ (void)clock;
+ (void)secs;
+ (void)nanos;
+ mlibc::infoLogger() << "mlibc: clock_getres is a stub" << frg::endlog;
+ return 0;
+}
+
+} //namespace mlibc
+