From 8913c4fa0d2cec19db6e8b4d8598125977243c5d Mon Sep 17 00:00:00 2001 From: Tharo <17233964+Thar0@users.noreply.github.com> Date: Tue, 29 Aug 2023 16:30:00 +0100 Subject: [PATCH] Fix and tidy up missing_gcc_functions.c (#1529) --- src/gcc_fix/missing_gcc_functions.c | 287 ++++++++++++++++------------ 1 file changed, 162 insertions(+), 125 deletions(-) diff --git a/src/gcc_fix/missing_gcc_functions.c b/src/gcc_fix/missing_gcc_functions.c index 4c572a764b..c62418c7bb 100644 --- a/src/gcc_fix/missing_gcc_functions.c +++ b/src/gcc_fix/missing_gcc_functions.c @@ -7,7 +7,9 @@ #include "global.h" -// Self-hosted memcmp. +// Self-hosted libc memory functions, gcc assumes these exist even in a freestanding +// environment and there is no way to tell it otherwise. + int memcmp(void* s1, const void* s2, size_t n) { u8* m1 = (u8*)s1; u8* m2 = (u8*)s2; @@ -35,177 +37,212 @@ void* memset(void* str, s32 c, size_t n) { return str; } -// These functions convert c to an unsigned integer, rounding toward zero. Negative values -// all become zero. -u32 __fixunssfdi(f32 a) { - if (a < 0.0f) { - a = 0.0f; +// Conversions involving 64-bit integer types required by the O32 MIPS ABI. + +// f32 -> u64, negative values become 0 +u64 __fixunssfdi(f32 a) { + if (a > 0.0f) { + register union { + f64 f; + u64 i; + } m; + + __asm__ ("cvt.l.s %0, %1" : "=f"(m.f) : "f"(a)); + return m.i; } - - return (u32)a; + return 0; } -u32 __fixunsdfdi(f64 a) { - if (a < 0.0) { - a = 0.0; +// f64 -> u64, negative values become 0 +u64 __fixunsdfdi(f64 a) { + if (a > 0.0) { + register union { + f64 f; + u64 i; + } m; + + __asm__ ("cvt.l.d %0, %1" : "=f"(m.f) : "f"(a)); + return m.i; } - - return (u32)a; + return 0; } -// These functions convert c to a signed integer, rounding toward zero. -s32 __fixsfdi(f32 c) { - return (s32)c; +// f32 -> s64 +s64 __fixsfdi(f32 c) { + register union { + f64 f; + s64 i; + } m; + + __asm__ ("cvt.l.s %0, %1" : "=f"(m.f) : "f"(c)); + return m.i; } -s32 __fixdfdi(f64 c) { - return (s32)c; +// f64 -> s64 +s64 __fixdfdi(f64 c) { + register union { + f64 f; + s64 i; + } m; + + __asm__ ("cvt.l.d %0, %1" : "=f"(m.f) : "f"(c)); + return m.i; } -// These functions convert c, a signed integer, to floating point. -f32 __floatdisf(s32 c) { - return (f32)c; +// s64 -> f32 +f32 __floatdisf(s64 c) { + register union { + f64 f; + s64 i; + } m; + register f32 v; + + m.i = c; + __asm__ ("cvt.s.l %0, %1" : "=f"(v) : "f"(m.f)); + return v; } -f64 __floatdidf(s32 c) { - return (f64)c; +// s64 -> f64 +f64 __floatdidf(s64 c) { + register union { + f64 f; + s64 i; + } m; + register f64 v; + + m.i = c; + __asm__ ("cvt.d.l %0, %1" : "=f"(v) : "f"(m.f)); + return v; } -// These functions convert c, an unsigned integer, to floating point. -f32 __floatundisf(u32 c) { - return (f32)c; +// u64 -> f32 +f32 __floatundisf(u64 c) { + register union { + f64 f; + u64 i; + } m; + register f32 v; + + m.i = c; + __asm__ ("cvt.s.l %0, %1" : "=f"(v) : "f"(m.f)); + if ((s64)c < 0) { + // cvt.s.l assumes signed input, adjust output + v += 4294967296.0f; // 2^32 + } + return v; } -f64 __floatundidf(u32 c) { - return (f64)c; +// u64 -> f64 +f64 __floatundidf(u64 c) { + register union { + f64 f; + u64 i; + } m; + register f64 v; + + m.i = c; + __asm__ ("cvt.d.l %0, %1" : "=f"(v) : "f"(m.f)); + if ((s64)c < 0) { + // cvt.d.l assumes signed input, adjust output + v += 18446744073709551616.0; // 2^64 + } + return v; } -f32 __powisf2(f32 a, s32 b) { - const s32 recip = b < 0; - f32 r = 1; +// Compute x^m by binary exponentiation - while (1) { - if (b & 1) { - r *= a; +f32 __powisf2(f32 x, s32 m) { + u32 n = (m < 0) ? -m : m; + f32 y = (n % 2 != 0) ? x : 1.0f; + + while (n >>= 1) { + x = x * x; + + if (n % 2 != 0) { + y = y * x; } - - b /= 2; - - if (b == 0) { - break; - } - - a *= a; } - return recip ? 1 / r : r; + return (m < 0) ? (1.0f / y) : y; } +// Compute division and modulo of 64-bit signed and unsigned integers + __asm__(" \n\ .set push \n\ - .set noat \n\ .set noreorder \n\ .set gp=64 \n\ \n\ .global __umoddi3 \n\ __umoddi3: \n\ - .type __umoddi3, @function \n\ - .ent __umoddi3 \n\ - sw $a0, ($sp) \n\ - sw $a1, 4($sp) \n\ - sw $a2, 8($sp) \n\ - sw $a3, 0xc($sp) \n\ - ld $t7, 8($sp) \n\ - ld $t6, ($sp) \n\ - ddivu $zero, $t6, $t7 \n\ - bnez $t7, 1f \n\ - nop \n\ - break 7 \n\ -1: \n\ - mfhi $v0 \n\ - dsll32 $v1, $v0, 0 \n\ - dsra32 $v1, $v1, 0 \n\ - jr $ra \n\ + .type __umoddi3, @function \n\ + .ent __umoddi3 \n\ + sw $a0, 0x0($sp) \n\ + sw $a1, 0x4($sp) \n\ + sw $a2, 0x8($sp) \n\ + sw $a3, 0xC($sp) \n\ + ld $t6, 0($sp) \n\ + ld $t7, 8($sp) \n\ + dremu $v0, $t6, $t7 \n\ + dsll32 $v1, $v0, 0 \n\ + dsra32 $v1, $v1, 0 \n\ + jr $ra \n\ dsra32 $v0, $v0, 0 \n\ - .end __umoddi3 \n\ - .size __umoddi3, . - __umoddi3 \n\ + .end __umoddi3 \n\ + .size __umoddi3, . - __umoddi3 \n\ \n\ .global __udivdi3 \n\ __udivdi3: \n\ - .type __udivdi3, @function \n\ - .ent __udivdi3 \n\ - sw $a0, ($sp) \n\ - sw $a1, 4($sp) \n\ - sw $a2, 8($sp) \n\ - sw $a3, 0xc($sp) \n\ - ld $t7, 8($sp) \n\ - ld $t6, ($sp) \n\ - ddivu $zero, $t6, $t7 \n\ - bnez $t7, 1f \n\ - nop \n\ - break 7 \n\ -1: \n\ - mflo $v0 \n\ - dsll32 $v1, $v0, 0 \n\ - dsra32 $v1, $v1, 0 \n\ - jr $ra \n\ + .type __udivdi3, @function \n\ + .ent __udivdi3 \n\ + sw $a0, 0x0($sp) \n\ + sw $a1, 0x4($sp) \n\ + sw $a2, 0x8($sp) \n\ + sw $a3, 0xC($sp) \n\ + ld $t6, 0($sp) \n\ + ld $t7, 8($sp) \n\ + ddivu $v0, $t6, $t7 \n\ + dsll32 $v1, $v0, 0 \n\ + dsra32 $v1, $v1, 0 \n\ + jr $ra \n\ dsra32 $v0, $v0, 0 \n\ - .end __udivdi3 \n\ - .size __udivdi3, . - __udivdi3 \n\ + .end __udivdi3 \n\ + .size __udivdi3, . - __udivdi3 \n\ \n\ .global __moddi3 \n\ __moddi3: \n\ - .type __moddi3, @function \n\ - .ent __moddi3 \n\ - sw $a0, ($sp) \n\ - sw $a1, 4($sp) \n\ - sw $a2, 8($sp) \n\ - sw $a3, 0xc($sp) \n\ - ld $t7, 8($sp) \n\ - ld $t6, ($sp) \n\ - ddivu $zero, $t6, $t7 \n\ - bnez $t7, 1f \n\ - nop \n\ - break 7 \n\ -1: \n\ - mfhi $v0 \n\ - dsll32 $v1, $v0, 0 \n\ - dsra32 $v1, $v1, 0 \n\ - jr $ra \n\ + .type __moddi3, @function \n\ + .ent __moddi3 \n\ + sw $a0, 0x0($sp) \n\ + sw $a1, 0x4($sp) \n\ + sw $a2, 0x8($sp) \n\ + sw $a3, 0xC($sp) \n\ + ld $t6, 0($sp) \n\ + ld $t7, 8($sp) \n\ + drem $v0, $t6, $t7 \n\ + dsll32 $v1, $v0, 0 \n\ + dsra32 $v1, $v1, 0 \n\ + jr $ra \n\ dsra32 $v0, $v0, 0 \n\ - .end __moddi3 \n\ - .size __moddi3, . - __moddi3 \n\ + .end __moddi3 \n\ + .size __moddi3, . - __moddi3 \n\ \n\ .global __divdi3 \n\ __divdi3: \n\ - .type __divdi3, @function \n\ - .ent __divdi3 \n\ - sw $a0, ($sp) \n\ - sw $a1, 4($sp) \n\ - sw $a2, 8($sp) \n\ - sw $a3, 0xc($sp) \n\ + .type __divdi3, @function \n\ + .ent __divdi3 \n\ + sw $a0, 0x0($sp) \n\ + sw $a1, 0x4($sp) \n\ + sw $a2, 0x8($sp) \n\ + sw $a3, 0xC($sp) \n\ + ld $t6, 0($sp) \n\ ld $t7, 8($sp) \n\ - ld $t6, ($sp) \n\ - ddiv $zero, $t6, $t7 \n\ - nop \n\ - bnez $t7, 1f \n\ - nop \n\ - break 7 \n\ -1: \n\ - daddiu $at, $zero, -1 \n\ - bne $t7, $at, 2f \n\ - daddiu $at, $zero, 1 \n\ - dsll32 $at, $at, 0x1f \n\ - bne $t6, $at, 2f \n\ - nop \n\ - break 6 \n\ -2: \n\ - mflo $v0 \n\ + ddiv $v0, $t6, $t7 \n\ dsll32 $v1, $v0, 0 \n\ dsra32 $v1, $v1, 0 \n\ jr $ra \n\ dsra32 $v0, $v0, 0 \n\ - .end __divdi3 \n\ - .size __divdi3, . - __divdi3 \n\ + .end __divdi3 \n\ + .size __divdi3, . - __divdi3 \n\ \n\ .set pop \n\ \n");