qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH] tcg: Fix execution on Apple Silicon


From: Roman Bolshakov
Subject: [PATCH] tcg: Fix execution on Apple Silicon
Date: Sat, 2 Jan 2021 15:21:02 +0300

Pages can't be both write and executable at the same time on Apple
Silicon. macOS provides public API to switch write protection [1] for
JIT applications, like TCG.

1. 
https://developer.apple.com/documentation/apple_silicon/porting_just-in-time_compilers_to_apple_silicon

Signed-off-by: Roman Bolshakov <r.bolshakov@yadro.com>
---

Happy holidays, everyone.

This is somewhat similar to 
https://patchwork.kernel.org/project/qemu-devel/patch/20201108232425.1705-7-j@getutm.app/
but I couldn't apply the series so I started from scratch.

The primary difference from the patch above is that public API is used.
Other differences:
  * TB pages are mostly kept write-locked except around tcg_qemu_tb_exec()
  * x86_64 macOS doesn't use MAP_JIT and W^X switches

Regards,
Roman

 accel/tcg/cpu-exec.c      | 10 ++++++++++
 accel/tcg/translate-all.c | 26 ++++++++++++++++++++++++++
 include/exec/exec-all.h   |  2 ++
 tcg/tcg.c                 |  1 +
 4 files changed, 39 insertions(+)

diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 8689c54499..0042fc9f2b 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -175,7 +175,9 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, 
TranslationBlock *itb)
     }
 #endif /* DEBUG_DISAS */
 
+    tb_write_lock();
     ret = tcg_qemu_tb_exec(env, tb_ptr);
+    tb_write_unlock();
     cpu->can_do_io = 1;
     last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
     tb_exit = ret & TB_EXIT_MASK;
@@ -220,9 +222,11 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
     cflags |= MIN(max_cycles, CF_COUNT_MASK);
 
     mmap_lock();
+    tb_write_unlock();
     tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base,
                      orig_tb->flags, cflags);
     tb->orig_tb = orig_tb;
+    tb_write_lock();
     mmap_unlock();
 
     /* execute the generated code */
@@ -268,7 +272,9 @@ void cpu_exec_step_atomic(CPUState *cpu)
         tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
         if (tb == NULL) {
             mmap_lock();
+            tb_write_unlock();
             tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
+            tb_write_lock();
             mmap_unlock();
         }
 
@@ -428,7 +434,9 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
     tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
     if (tb == NULL) {
         mmap_lock();
+        tb_write_unlock();
         tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
+        tb_write_lock();
         mmap_unlock();
         /* We add the TB in the virtual pc hash table for the fast lookup */
         qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
@@ -444,7 +452,9 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
 #endif
     /* See if we can patch the calling TB. */
     if (last_tb) {
+        tb_write_unlock();
         tb_add_jump(last_tb, tb_exit, tb);
+        tb_write_lock();
     }
     return tb;
 }
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index b7d50a73d4..1562076ffb 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -1072,6 +1072,9 @@ static inline void *alloc_code_gen_buffer(void)
     size_t size = tcg_ctx->code_gen_buffer_size;
     void *buf;
 
+#if defined(__APPLE__) && defined(__aarch64__)
+    flags |= MAP_JIT;
+#endif
     buf = mmap(NULL, size, prot, flags, -1, 0);
     if (buf == MAP_FAILED) {
         return NULL;
@@ -1485,7 +1488,9 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, 
bool rm_from_page_list)
 
 static void tb_phys_invalidate__locked(TranslationBlock *tb)
 {
+    tb_write_unlock();
     do_tb_phys_invalidate(tb, true);
+    tb_write_lock();
 }
 
 /* invalidate one TB
@@ -2722,3 +2727,24 @@ void tcg_flush_softmmu_tlb(CPUState *cs)
     tlb_flush(cs);
 #endif
 }
+
+#if defined(__APPLE__) && defined(__aarch64__)
+static void tb_write_protect(bool locked)
+{
+    if (pthread_jit_write_protect_supported_np()){
+        pthread_jit_write_protect_np(locked);
+    }
+}
+#else
+static void tb_write_protect(bool locked) {}
+#endif
+
+void tb_write_lock(void)
+{
+    tb_write_protect(true);
+}
+
+void tb_write_unlock(void)
+{
+    tb_write_protect(false);
+}
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index fab573da06..962dca0975 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -549,6 +549,8 @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, 
target_ulong pc,
                                    target_ulong cs_base, uint32_t flags,
                                    uint32_t cf_mask);
 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
+void tb_write_lock(void);
+void tb_write_unlock(void);
 
 /* GETPC is the true target of the return instruction that we'll execute.  */
 #if defined(CONFIG_TCG_INTERPRETER)
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 43c6cf8f52..303bb436bd 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -1065,6 +1065,7 @@ void tcg_prologue_init(TCGContext *s)
     s->pool_labels = NULL;
 #endif
 
+    tb_write_unlock();
     /* Generate the prologue.  */
     tcg_target_qemu_prologue(s);
 
-- 
2.29.2




reply via email to

[Prev in Thread] Current Thread [Next in Thread]