CI(full),CI(release): More PPC64 atomic fixes

This commit is contained in:
mio
2025-02-11 10:18:01 +08:00
parent 0a02746572
commit 381850356f
2 changed files with 16 additions and 12 deletions

View File

@@ -374,6 +374,7 @@ target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg,
} }
#ifdef TARGET_PPC64 #ifdef TARGET_PPC64
#ifdef HAVE_ATOMIC128
uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr, uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr,
uint32_t opidx) uint32_t opidx)
{ {
@@ -419,7 +420,9 @@ void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr,
val = int128_make128(lo, hi); val = int128_make128(lo, hi);
helper_atomic_sto_be_mmu(env, addr, val, opidx, GETPC()); helper_atomic_sto_be_mmu(env, addr, val, opidx, GETPC());
} }
#endif
#ifdef HAVE_CMPXCHG128
uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr, uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr,
uint64_t new_lo, uint64_t new_hi, uint64_t new_lo, uint64_t new_hi,
uint32_t opidx) uint32_t opidx)
@@ -464,6 +467,7 @@ uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
return env->so + success * CRF_EQ_BIT; return env->so + success * CRF_EQ_BIT;
} }
#endif #endif
#endif
/*****************************************************************************/ /*****************************************************************************/
/* Altivec extension helpers */ /* Altivec extension helpers */

View File

@@ -2780,7 +2780,7 @@ static void gen_lq(DisasContext *ctx)
hi = cpu_gpr[rd]; hi = cpu_gpr[rd];
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
if (HAVE_ATOMIC128) { #ifdef HAVE_ATOMIC128
TCGv_i32 oi = tcg_temp_new_i32(tcg_ctx); TCGv_i32 oi = tcg_temp_new_i32(tcg_ctx);
if (ctx->le_mode) { if (ctx->le_mode) {
tcg_gen_movi_i32(tcg_ctx, oi, make_memop_idx(MO_LEQ, ctx->mem_idx)); tcg_gen_movi_i32(tcg_ctx, oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
@@ -2791,11 +2791,11 @@ static void gen_lq(DisasContext *ctx)
} }
tcg_temp_free_i32(tcg_ctx, oi); tcg_temp_free_i32(tcg_ctx, oi);
tcg_gen_ld_i64(tcg_ctx, hi, tcg_ctx->cpu_env, offsetof(CPUPPCState, retxh)); tcg_gen_ld_i64(tcg_ctx, hi, tcg_ctx->cpu_env, offsetof(CPUPPCState, retxh));
} else { #else
/* Restart with exclusive lock. */ /* Restart with exclusive lock. */
gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env);
ctx->base.is_jmp = DISAS_NORETURN; ctx->base.is_jmp = DISAS_NORETURN;
} #endif
} else if (ctx->le_mode) { } else if (ctx->le_mode) {
tcg_gen_qemu_ld_i64(tcg_ctx, lo, EA, ctx->mem_idx, MO_LEQ); tcg_gen_qemu_ld_i64(tcg_ctx, lo, EA, ctx->mem_idx, MO_LEQ);
gen_addr_add(ctx, EA, EA, 8); gen_addr_add(ctx, EA, EA, 8);
@@ -2958,7 +2958,7 @@ static void gen_std(DisasContext *ctx)
hi = cpu_gpr[rs]; hi = cpu_gpr[rs];
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
if (HAVE_ATOMIC128) { #ifdef HAVE_ATOMIC128
TCGv_i32 oi = tcg_temp_new_i32(tcg_ctx); TCGv_i32 oi = tcg_temp_new_i32(tcg_ctx);
if (ctx->le_mode) { if (ctx->le_mode) {
tcg_gen_movi_i32(tcg_ctx, oi, make_memop_idx(MO_LEQ, ctx->mem_idx)); tcg_gen_movi_i32(tcg_ctx, oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
@@ -2968,11 +2968,11 @@ static void gen_std(DisasContext *ctx)
gen_helper_stq_be_parallel(tcg_ctx, tcg_ctx->cpu_env, EA, lo, hi, oi); gen_helper_stq_be_parallel(tcg_ctx, tcg_ctx->cpu_env, EA, lo, hi, oi);
} }
tcg_temp_free_i32(tcg_ctx, oi); tcg_temp_free_i32(tcg_ctx, oi);
} else { #else
/* Restart with exclusive lock. */ /* Restart with exclusive lock. */
gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env);
ctx->base.is_jmp = DISAS_NORETURN; ctx->base.is_jmp = DISAS_NORETURN;
} #endif
} else if (ctx->le_mode) { } else if (ctx->le_mode) {
tcg_gen_qemu_st_i64(tcg_ctx, lo, EA, ctx->mem_idx, MO_LEQ); tcg_gen_qemu_st_i64(tcg_ctx, lo, EA, ctx->mem_idx, MO_LEQ);
gen_addr_add(ctx, EA, EA, 8); gen_addr_add(ctx, EA, EA, 8);
@@ -3574,7 +3574,7 @@ static void gen_lqarx(DisasContext *ctx)
hi = cpu_gpr[rd]; hi = cpu_gpr[rd];
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
if (HAVE_ATOMIC128) { #ifdef HAVE_ATOMIC128
TCGv_i32 oi = tcg_temp_new_i32(tcg_ctx); TCGv_i32 oi = tcg_temp_new_i32(tcg_ctx);
if (ctx->le_mode) { if (ctx->le_mode) {
tcg_gen_movi_i32(tcg_ctx, oi, make_memop_idx(MO_LEQ | MO_ALIGN_16, tcg_gen_movi_i32(tcg_ctx, oi, make_memop_idx(MO_LEQ | MO_ALIGN_16,
@@ -3587,13 +3587,13 @@ static void gen_lqarx(DisasContext *ctx)
} }
tcg_temp_free_i32(tcg_ctx, oi); tcg_temp_free_i32(tcg_ctx, oi);
tcg_gen_ld_i64(tcg_ctx, hi, tcg_ctx->cpu_env, offsetof(CPUPPCState, retxh)); tcg_gen_ld_i64(tcg_ctx, hi, tcg_ctx->cpu_env, offsetof(CPUPPCState, retxh));
} else { #else
/* Restart with exclusive lock. */ /* Restart with exclusive lock. */
gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env);
ctx->base.is_jmp = DISAS_NORETURN; ctx->base.is_jmp = DISAS_NORETURN;
tcg_temp_free(tcg_ctx, EA); tcg_temp_free(tcg_ctx, EA);
return; return;
} #endif
} else if (ctx->le_mode) { } else if (ctx->le_mode) {
tcg_gen_qemu_ld_i64(tcg_ctx, lo, EA, ctx->mem_idx, MO_LEQ | MO_ALIGN_16); tcg_gen_qemu_ld_i64(tcg_ctx, lo, EA, ctx->mem_idx, MO_LEQ | MO_ALIGN_16);
tcg_gen_mov_tl(tcg_ctx, cpu_reserve, EA); tcg_gen_mov_tl(tcg_ctx, cpu_reserve, EA);
@@ -3632,7 +3632,7 @@ static void gen_stqcx_(DisasContext *ctx)
hi = cpu_gpr[rs]; hi = cpu_gpr[rs];
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
if (HAVE_CMPXCHG128) { #ifdef HAVE_CMPXCHG128
TCGv_i32 oi = tcg_const_i32(tcg_ctx, DEF_MEMOP(MO_Q) | MO_ALIGN_16); TCGv_i32 oi = tcg_const_i32(tcg_ctx, DEF_MEMOP(MO_Q) | MO_ALIGN_16);
if (ctx->le_mode) { if (ctx->le_mode) {
gen_helper_stqcx_le_parallel(tcg_ctx, cpu_crf[0], tcg_ctx->cpu_env, gen_helper_stqcx_le_parallel(tcg_ctx, cpu_crf[0], tcg_ctx->cpu_env,
@@ -3642,11 +3642,11 @@ static void gen_stqcx_(DisasContext *ctx)
EA, lo, hi, oi); EA, lo, hi, oi);
} }
tcg_temp_free_i32(tcg_ctx, oi); tcg_temp_free_i32(tcg_ctx, oi);
} else { #else
/* Restart with exclusive lock. */ /* Restart with exclusive lock. */
gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env);
ctx->base.is_jmp = DISAS_NORETURN; ctx->base.is_jmp = DISAS_NORETURN;
} #endif
tcg_temp_free(tcg_ctx, EA); tcg_temp_free(tcg_ctx, EA);
} else { } else {
TCGLabel *lab_fail = gen_new_label(tcg_ctx); TCGLabel *lab_fail = gen_new_label(tcg_ctx);