Skip to content

Commit 1ab346f

Browse files
committed
TCG, fix: fix TCG mode
- Isolate TCG mode implementation from other optimizations - Redefine tb_find, tb_gen_code, cpu_tb_exec for TCG mode Signed-off-by: Zhaoyang Xie <xie_zyang@163.com>
1 parent 252fe6a commit 1ab346f

16 files changed

Lines changed: 665 additions & 35 deletions

File tree

accel/tcg/cpu-exec.c

Lines changed: 122 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -182,6 +182,7 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
182182
* TCG is not considered a security-sensitive part of QEMU so this does not
183183
* affect the impact of CFI in environment with high security requirements
184184
*/
185+
#ifdef CONFIG_LATX
185186
static inline TranslationBlock * QEMU_DISABLE_CFI
186187
cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
187188
{
@@ -263,7 +264,6 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
263264
#ifdef CONFIG_LATX_DEBUG
264265
latx_before_exec_trace_tb(env, itb);
265266
#endif
266-
#ifdef CONFIG_LATX
267267

268268
env->fpu_clobber = false;
269269
ret = tcg_qemu_tb_exec(env, tb_ptr);
@@ -307,9 +307,6 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
307307
link_indirect_jmp(env);
308308
}
309309

310-
#else
311-
ret = tcg_qemu_tb_exec(env, tb_ptr);
312-
#endif
313310
#ifdef CONFIG_LATX_DEBUG
314311
latx_after_exec_trace_tb(env, itb);
315312
#endif
@@ -369,8 +366,81 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
369366

370367
return last_tb;
371368
}
369+
#else
370+
static inline TranslationBlock * QEMU_DISABLE_CFI
371+
cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
372+
{
373+
if (!itb) {
374+
fprintf(stderr, "[LATX] ERROR!\n");
375+
}
376+
CPUArchState *env = cpu->env_ptr;
377+
uintptr_t ret;
378+
TranslationBlock *last_tb;
379+
const void *tb_ptr = itb->tc.ptr;
380+
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
381+
qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
382+
"pid(%d) - tid(%" PRIuPTR ") Trace cpu%d: %p [ "
383+
TARGET_FMT_lx "/%#x] %s\n",
384+
getpid(), (uintptr_t)pthread_self(), cpu->cpu_index, itb->tc.ptr,
385+
itb->pc, itb->flags, lookup_symbol(itb->pc));
386+
}
387+
388+
#if defined(DEBUG_DISAS)
389+
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
390+
&& qemu_log_in_addr_range(itb->pc)) {
391+
FILE *logfile = qemu_log_lock();
392+
int flags = 0;
393+
if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
394+
flags |= CPU_DUMP_FPU;
395+
}
396+
#if defined(TARGET_I386)
397+
flags |= CPU_DUMP_CCOP;
398+
#endif
399+
log_cpu_state(cpu, flags);
400+
qemu_log_unlock(logfile);
401+
}
402+
#endif /* DEBUG_DISAS */
403+
404+
qemu_thread_jit_execute();
405+
ret = tcg_qemu_tb_exec(env, tb_ptr);
406+
cpu->can_do_io = 1;
407+
/*
408+
* TODO: Delay swapping back to the read-write region of the TB
409+
* until we actually need to modify the TB. The read-only copy,
410+
* coming from the rx region, shares the same host TLB entry as
411+
* the code that executed the exit_tb opcode that arrived here.
412+
* If we insist on touching both the RX and the RW pages, we
413+
* double the host TLB pressure.
414+
*/
415+
last_tb = tcg_splitwx_to_rw((void *)(ret & ~TB_EXIT_MASK));
416+
*tb_exit = ret & TB_EXIT_MASK;
417+
418+
trace_exec_tb_exit(last_tb, *tb_exit);
419+
420+
if (*tb_exit > TB_EXIT_IDX1) {
421+
/* We didn't start executing this TB (eg because the instruction
422+
* counter hit zero); we must restore the guest PC to the address
423+
* of the start of the TB.
424+
*/
425+
CPUClass *cc = CPU_GET_CLASS(cpu);
426+
qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
427+
"Stopped execution of TB chain before %p ["
428+
TARGET_FMT_lx "] %s\n",
429+
last_tb->tc.ptr, last_tb->pc,
430+
lookup_symbol(last_tb->pc));
431+
if (cc->tcg_ops->synchronize_from_tb) {
432+
cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
433+
} else {
434+
assert(cc->set_pc);
435+
cc->set_pc(cpu, last_tb->pc);
436+
}
437+
}
372438

373439

440+
return last_tb;
441+
}
442+
#endif
443+
374444
static void cpu_exec_enter(CPUState *cpu)
375445
{
376446
CPUClass *cc = CPU_GET_CLASS(cpu);
@@ -552,7 +622,9 @@ void tb_stub_bypass(TranslationBlock *tb, int n, uintptr_t addr)
552622

553623
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
554624
{
625+
#ifdef CONFIG_LATX
555626
assert(!use_tu_jmp(tb));
627+
#endif
556628
#if defined(CONFIG_LATX) && defined(CONFIG_LATX_BNE_B)
557629
#define B_SHIFT 26
558630
#define OFF16_BITS 0xfc0003ff
@@ -681,10 +753,12 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
681753
(uintptr_t)tb_next);
682754
if (old) {
683755
goto out_unlock_next;
756+
#ifdef CONFIG_LATX
684757
} else if (tb_need_relink(tb, n)) {
685758
clear_signal_link_flag(tb, n);
686759
latx_tb_set_jmp_target(tb, n, tb_next);
687760
goto out_unlock_next;
761+
#endif
688762
}
689763

690764
#ifdef CONFIG_LATX
@@ -717,6 +791,7 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
717791
#include "tu.h"
718792
#endif
719793

794+
#ifdef CONFIG_LATX
720795
static inline TranslationBlock *tb_find(CPUState *cpu,
721796
TranslationBlock *last_tb,
722797
int tb_exit, uint32_t cflags)
@@ -785,6 +860,49 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
785860
}
786861
return tb;
787862
}
863+
#else
864+
static inline TranslationBlock *tb_find(CPUState *cpu,
865+
TranslationBlock *last_tb,
866+
int tb_exit, uint32_t cflags)
867+
{
868+
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
869+
TranslationBlock *tb;
870+
target_ulong cs_base, pc;
871+
uint32_t flags;
872+
873+
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
874+
875+
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
876+
if (tb == NULL) {
877+
mmap_lock();
878+
879+
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
880+
881+
if (!tb) {
882+
mmap_unlock();
883+
return NULL;
884+
}
885+
/* We add the TB in the virtual pc hash table for the fast lookup */
886+
int hash_value = tb_jmp_cache_hash_func(pc);
887+
qatomic_set(&cpu->tb_jmp_cache[hash_value], tb);
888+
mmap_unlock();
889+
}
890+
#ifndef CONFIG_USER_ONLY
891+
/* We don't take care of direct jumps when address mapping changes in
892+
* system emulation. So it's not safe to make a direct jump to a TB
893+
* spanning two pages because the mapping for the second page can change.
894+
*/
895+
if (tb->page_addr[1] != -1) {
896+
last_tb = NULL;
897+
}
898+
#endif
899+
/* See if we can patch the calling TB. */
900+
if (last_tb) {
901+
tb_add_jump(last_tb, tb_exit, tb);
902+
}
903+
return tb;
904+
}
905+
#endif
788906

789907
static inline bool cpu_handle_halt(CPUState *cpu)
790908
{
@@ -837,7 +955,6 @@ TranslationBlock * kzt_tb_find_exp(
837955
{
838956
return tb_find(cpu, last_tb, tb_exit, cflags);
839957
}
840-
841958
#endif
842959

843960
static inline bool cpu_handle_exception(CPUState *cpu, int *ret)

0 commit comments

Comments
 (0)