1
0
Fork 0
mirror of https://github.com/zeldaret/oot.git synced 2025-01-14 12:17:08 +00:00

[iQue] Match handwritten asm files in libultra/os, match C replacements of osGetCount and __osSetCompare (#2413)

* [iQue] Match handwritten asm files in libultra/os, match C replacements of osGetCount and __osSetCompare

* Format

* Adjust some label names in dcache functions
This commit is contained in:
Tharo 2025-01-10 11:16:19 +00:00 committed by GitHub
parent eed11e3fb5
commit 94971a61b4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 91 additions and 41 deletions

View file

@ -648,6 +648,9 @@ $(BUILD_DIR)/src/libultra/%.o: CFLAGS := $(EGCS_CFLAGS) -mno-abicalls
$(BUILD_DIR)/src/libultra/%.o: CCASFLAGS := $(EGCS_CCASFLAGS)
$(BUILD_DIR)/src/libultra/%.o: ASOPTFLAGS := $(EGCS_ASOPTFLAGS)
$(BUILD_DIR)/src/libultra/reg/_%.o: OPTFLAGS := -O0
$(BUILD_DIR)/src/libultra/reg/_%.o: MIPS_VERSION := -mgp64 -mfp64 -mips3
$(BUILD_DIR)/src/libultra/libc/ll.o: OPTFLAGS := -O0
$(BUILD_DIR)/src/libultra/libc/llcvt.o: OPTFLAGS := -O0

View file

@ -78,6 +78,9 @@
#define MTC0(dst, src) \
.set noreorder; mtc0 dst, src; .set reorder
#define CACHE(op, base) \
.set noreorder; cache op, base; .set reorder
#define CFC1(dst, src) \
.set noreorder; cfc1 dst, src; .set reorder
#define CTC1(src, dst) \

View file

@ -20,56 +20,50 @@
* the entire data cache is invalidated.
*/
LEAF(osInvalDCache)
.set noreorder
/* If the amount to invalidate is less than or equal to 0, return immediately */
blez a1, 3f
nop
/* If the amount to invalidate is as large as or larger than
* the data cache size, invalidate all */
li t3, DCACHE_SIZE
bgeu a1, t3, 4f
nop
/* Ensure end address doesn't wrap around and end up smaller
* than the start address */
move t0, a0
addu t1, a0, a1
bgeu t0, t1, 3f
nop
/* Mask start with cache line */
addiu t1, t1, -DCACHE_LINESIZE
andi t2, t0, DCACHE_LINEMASK
/* If mask is not zero, the start is not cache aligned */
beqz t2, 1f
addiu t1, t1, -DCACHE_LINESIZE
/* Subtract mask result to align to cache line */
subu t0, t0, t2
/* Hit-Writeback-Invalidate unaligned part */
cache (CACH_PD | C_HWBINV), (t0)
CACHE( (CACH_PD | C_HWBINV), (t0))
/* If that's all there is to do, return early */
bgeu t0, t1, 3f
nop
addiu t0, t0, DCACHE_LINESIZE
1:
/* Mask end with cache line */
andi t2, t1, DCACHE_LINEMASK
/* If mask is not zero, the end is not cache aligned */
beqz t2, 1f
nop
beqz t2, 2f
/* Subtract mask result to align to cache line */
subu t1, t1, t2
/* Hit-Writeback-Invalidate unaligned part */
cache (CACH_PD | C_HWBINV), DCACHE_LINESIZE(t1)
CACHE( (CACH_PD | C_HWBINV), DCACHE_LINESIZE(t1))
/* If that's all there is to do, return early */
bltu t1, t0, 3f
nop
/* Invalidate the rest */
1:
2:
/* Hit-Invalidate */
cache (CACH_PD | C_HINV), (t0)
bltu t0, t1, 1b
CACHE( (CACH_PD | C_HINV), (t0))
.set noreorder
bltu t0, t1, 2b
addiu t0, t0, DCACHE_LINESIZE
.set reorder
3:
jr ra
nop
4:
li t0, K0BASE
@ -77,10 +71,10 @@ LEAF(osInvalDCache)
addiu t1, t1, -DCACHE_LINESIZE
5:
/* Index-Writeback-Invalidate */
cache (CACH_PD | C_IWBINV), (t0)
CACHE( (CACH_PD | C_IWBINV), (t0))
.set noreorder
bltu t0, t1, 5b
addiu t0, DCACHE_LINESIZE
jr ra
nop
.set reorder
jr ra
END(osInvalDCache)

View file

@ -5,42 +5,40 @@
.text
LEAF(osInvalICache)
.set noreorder
/* If the amount to invalidate is less than or equal to 0, return immediately */
blez a1, 2f
nop
/* If the amount to invalidate is as large as or larger than */
/* the instruction cache size, invalidate all */
li t3, ICACHE_SIZE
bgeu a1, t3, 3f
nop
/* ensure end address doesn't wrap around and end up smaller */
/* than the start address */
move t0, a0
addu t1, a0, a1
bgeu t0, t1, 2f
nop
/* Mask and subtract to align to cache line */
andi t2, t0, ICACHE_LINEMASK
addiu t1, t1, -ICACHE_LINESIZE
andi t2, t0, ICACHE_LINEMASK
subu t0, t0, t2
1:
cache (CACH_PI | C_HINV), (t0)
CACHE( (CACH_PI | C_HINV), (t0))
.set noreorder
bltu t0, t1, 1b
addiu t0, t0, ICACHE_LINESIZE
.set reorder
2:
jr ra
nop
3:
li t0, K0BASE
addu t1, t0, t3
addiu t1, t1, -ICACHE_LINESIZE
4:
cache (CACH_PI | C_IINV), (t0)
CACHE( (CACH_PI | C_IINV), (t0))
.set noreorder
bltu t0, t1, 4b
addiu t0, ICACHE_LINESIZE
.set reorder
jr ra
nop
.set reorder
END(osInvalICache)

View file

@ -116,7 +116,7 @@ LEAF(osSetIntMask)
la t0, __OSGlobalIntMask
lw t3, (t0)
/* Bitwise-OR in the disabled CPU bits of __OSGlobalIntMask */
xor t0, t3, ~0
xor t0, t3, 0xFFFFFFFF
and t0, t0, SR_IMASK
or v0, v0, t0
/* Fetch MI_INTR_MASK_REG */
@ -125,7 +125,7 @@ LEAF(osSetIntMask)
beqz t2, 1f
srl t1, t3, RCP_IMASKSHIFT
/* Bitwise-OR in the disabled RCP bits of __OSGlobalIntMask */
xor t1, t1, ~0
xor t1, t1, 0xFFFFFFFF
and t1, t1, (RCP_IMASK >> RCP_IMASKSHIFT)
or t2, t2, t1
1:

View file

@ -13,41 +13,41 @@
*/
LEAF(osWritebackDCache)
/* If the amount to write back is less than or equal to 0, return immediately */
blez a1, .ret
blez a1, 2f
/* If the amount to write back is as large as or larger than */
/* the data cache size, write back all */
li t3, DCACHE_SIZE
bgeu a1, t3, .all
bgeu a1, t3, 3f
/* ensure end address doesn't wrap around and end up smaller */
/* than the start address */
move t0, a0
addu t1, a0, a1
bgeu t0, t1, .ret
bgeu t0, t1, 2f
/* Mask and subtract to align to cache line */
andi t2, t0, DCACHE_LINEMASK
addiu t1, t1, -DCACHE_LINESIZE
andi t2, t0, DCACHE_LINEMASK
subu t0, t0, t2
1:
CACHE( (CACH_PD | C_HWB), (t0))
.set noreorder
cache (CACH_PD | C_HWB), (t0)
bltu t0, t1, 1b
addiu t0, t0, DCACHE_LINESIZE
.set reorder
.ret:
2:
jr ra
/* same as osWritebackDCacheAll in operation */
.all:
3:
li t0, K0BASE
addu t1, t0, t3
addiu t1, t1, -DCACHE_LINESIZE
1:
4:
CACHE( (CACH_PD | C_IWBINV), (t0))
.set noreorder
cache (CACH_PD | C_IWBINV), (t0)
bltu t0, t1, 1b
bltu t0, t1, 4b
addiu t0, DCACHE_LINESIZE
.set reorder
jr ra

View file

@ -0,0 +1,32 @@
#ifdef BBPLAYER
#include "ultra64.h"
u32 __osBbLastRCount;
u32 __osBbRCountWraps;
u32 __osBbLastVCount;
u32 __osBbVCountWraps;
u32 osGetCount(void) {
u32 count;
u32 mask = __osDisableInt();
__asm__("mfc0 %0, $%1" : "=r"(count) : "i"(C0_COUNT));
if (count < __osBbLastRCount) {
__osBbRCountWraps++;
}
__osBbLastRCount = count;
count = (((u64)__osBbRCountWraps << 32) | count) * 125ull / 192ull;
if (count < __osBbLastVCount) {
__osBbVCountWraps++;
}
__osBbLastVCount = count;
__osRestoreInt(mask);
return count;
}
#endif

View file

@ -0,0 +1,20 @@
#ifdef BBPLAYER
#include "ultra64.h"
extern u32 __osBbLastRCount;
extern u32 __osBbRCountWraps;
extern u32 __osBbLastVCount;
extern u32 __osBbVCountWraps;
void __osSetCompare(u32 v) {
if (v != 0) {
u32 mask = __osDisableInt();
u32 wraps = (v < __osBbLastVCount) ? (__osBbVCountWraps + 1) : __osBbVCountWraps;
v = (((u64)wraps << 32) | v) * 192ull / 125ull;
__osRestoreInt(mask);
}
__asm__("mtc0 %0, $%1" ::"r"(v), "i"(C0_COMPARE));
}
#endif

View file

@ -59,8 +59,8 @@ B50,80000FA0,src/boot/zlib
6F10,80007360,src/libultra/os/seteventmesg
6FE0,80007430,src/libultra/os/getcause
6FF0,80007440,src/libultra/os/setwatchlo
7000,80007450,src/libultra/os/getcount
7100,80007550,src/libultra/os/setcompare
7000,80007450,src/libultra/reg/_getcount
7100,80007550,src/libultra/reg/_setcompare
71B0,80007600,src/libultra/io/sirawread
7200,80007650,src/libultra/io/sirawwrite
7250,800076A0,src/libultra/os/destroythread

1 offset vram .text
59 6F10 80007360 src/libultra/os/seteventmesg
60 6FE0 80007430 src/libultra/os/getcause
61 6FF0 80007440 src/libultra/os/setwatchlo
62 7000 80007450 src/libultra/os/getcount src/libultra/reg/_getcount
63 7100 80007550 src/libultra/os/setcompare src/libultra/reg/_setcompare
64 71B0 80007600 src/libultra/io/sirawread
65 7200 80007650 src/libultra/io/sirawwrite
66 7250 800076A0 src/libultra/os/destroythread