00001
00016
00017
00018 #ifndef _MACRO68_H_
00019 #define _MACRO68_H_
00020
00021 #include "emu68/srdef68.h"
00022 #include "emu68/excep68.h"
00023
00024 #ifdef __cplusplus
00025 extern "C" {
00026 #endif
00027
00028 #ifndef EMU68CYCLE
00029 # define ADDCYCLE(N)
00030 # define SETCYCLE(N)
00031 #else
00032 # define ADDCYCLE(N) reg68.cycle += (N)
00033 # define SETCYCLE(N) reg68.cycle = (N)
00034 #endif
00035
00041 #define EXCEPTION(VECTOR,LVL) \
00042 { \
00043 pushl(reg68.pc); pushw(reg68.sr); \
00044 reg68.sr &= 0x70FF; \
00045 reg68.sr |= (0x2000+((LVL)<<SR_IPL_BIT)); \
00046 reg68.pc = read_L(VECTOR); \
00047 }
00048
00050 #define ILLEGAL \
00051 {\
00052 EMU68error_add("Illegal pc:%06x",reg68.pc); \
00053 EXCEPTION(ILLEGAL_VECTOR,ILLEGAL_LVL); \
00054 }
00055
00057 #define BUSERROR(ADDR,MODE) \
00058 {\
00059 EMU68error_add("bus error pc:%06x addr:%06x (%c)",\
00060 reg68.pc,ADDR,MODE?'W':'R');\
00061 EXCEPTION(BUSERROR_VECTOR,BUSERROR_LVL) \
00062 }
00063
00065 #define LINEA EXCEPTION(LINEA_VECTOR,LINEA_LVL)
00066
00068 #define LINEF EXCEPTION(LINEF_VECTOR,LINEF_LVL)
00069
00071 #define TRAPV if(reg68.sr&SR_V) EXCEPTION(TRAPV_VECTOR,TRAPV_LVL)
00072
00074 #define TRAP(TRAP_N) EXCEPTION(TRAP_VECTOR(TRAP_N),TRAP_LVL)
00075
00077 #define CHK EXCEPTION(CHK_VECTOR,CHK_LVL)
00078
00080 #define CHKW(CHK_A,CHK_B) if((CHK_B)<0 || (CHK_B)>(CHK_A)){ CHK; }
00081
00090 #define NOP
00091
00093 #define RESET EMU68_reset()
00094
00099 #define STOP reg68.sr = (u16)get_nextw(); reg68.status = 1
00100
00102 #define RTS reg68.pc = popl()
00103
00105 #define RTE reg68.sr = popw(); RTS
00106
00108 #define RTR reg68.sr = (reg68.sr&0xFF00) | (u8)popw(); RTS
00109
00118 #define NBCDB(NBCD_S,NBCD_A) (NBCD_S)=(NBCD_A)
00119
00121 #define EXG(A,B) (A)^=(B); (B)^=(A); (A)^=(B)
00122
00124 #define EXTW(D) (D) = ((D)&0xFFFF0000) | ((u16)(s32)(s8)(D))
00125
00127 #define EXTL(D) (D) = (s32)(s16)(D)
00128
00130 #define TAS(TAS_A) { TSTB(TAS_A,TAS_A); (TAS_A) |= 0x80000000; }
00131
00133 #define CLR(CLR_S,CLR_A) \
00134 {\
00135 (CLR_A) = (CLR_A); \
00136 reg68.sr =(reg68.sr&~(SR_N|SR_V|SR_C)) | SR_Z;\
00137 CLR_S = 0;\
00138 }
00139
00141 #define CLRB(A,B) CLR(A,B)
00142
00144 #define CLRW(A,B) CLR(A,B)
00145
00147 #define CLRL(A,B) CLR(A,B)
00148
00150 #define LINK(R_LNK) \
00151 pushl(reg68.a[R_LNK]); \
00152 reg68.a[R_LNK] = reg68.a[7]; \
00153 reg68.a[7] += get_nextw()
00154
00156 #define UNLK(R_LNK) \
00157 reg68.a[7]=reg68.a[R_LNK]; \
00158 reg68.a[R_LNK]=popl()
00159
00161 #define SWAP(SWP_A) \
00162 { \
00163 (SWP_A) = ((u32)(SWP_A)>>16) | ((SWP_A)<<16); \
00164 reg68.sr = (reg68.sr&~(SR_V|SR_C|SR_Z|SR_N)) | \
00165 ((!(SWP_A))<<SR_Z_BIT) | \
00166 (((s32)(SWP_A)>>31)&SR_N); \
00167 }
00168
00176 #if 0
00177
00178 #define BTST(V,BIT) \
00179 reg68.sr = (reg68.sr&(~SR_Z)) | ((((V)&(1<<(BIT)))==0)<<SR_Z_BIT)
00180
00182 #define BSET(V,BIT) BTST(V,BIT); (V) |= (1<<(BIT));
00183
00185 #define BCLR(V,BIT) BTST(V,BIT); (V) &= ~(1<<(BIT));
00186
00188 #define BCHG(V,BIT) BTST(V,BIT); (V) ^= (1<<(BIT));
00189 */
00190 #endif
00191
00193 #define BTST(V,BIT) \
00194 reg68.sr = (reg68.sr&(~SR_Z)) | (((((V)>>(BIT))&1)^1)<<SR_Z_BIT)
00195
00197 #define BSET(V,BIT) \
00198 if( (V)&(1<<(BIT)) ) { reg68.sr &= ~SR_Z; }\
00199 else { (V) |= 1<<(BIT); reg68.sr |= SR_Z; }
00200
00202 #define BCLR(V,BIT) \
00203 if( (V)&(1<<(BIT)) ) { (V) &= ~(1<<(BIT)); reg68.sr &= ~SR_Z; }\
00204 else { reg68.sr |= SR_Z; }
00205
00207 #define BCHG(V,BIT) \
00208 if( (V)&(1<<(BIT)) ) { (V) &= ~(1<<(BIT)); reg68.sr &= ~SR_Z; }\
00209 else { (V) |= 1<<(BIT); reg68.sr |= SR_Z; }
00210
00218 #define MOVE(MOV_A) reg68.sr = (reg68.sr&(0xFF00 | SR_X)) \
00219 | (((MOV_A)==0)<<SR_Z_BIT) | (((s32)(MOV_A)>>31)&SR_N);
00220 #define TST(TST_V) MOVE(TST_V)
00221 #define TSTB(TST_S,TST_A) { TST_S=TST_A; TST(TST_S); }
00222 #define TSTW(TST_S,TST_A) { TST_S=TST_A; TST(TST_S); }
00223 #define TSTL(TST_S,TST_A) { TST_S=TST_A; TST(TST_S); }
00224
00233 #define MULSW(MUL_S, MUL_A, MUL_B) MUL_S = muls68(MUL_A, MUL_B)
00234
00236 #define MULUW(MUL_S, MUL_A, MUL_B) MUL_S = mulu68(MUL_A, MUL_B)
00237
00239 #define DIVSW(DIV_S, DIV_A, DIV_B) DIV_S = divs68(DIV_A, DIV_B)
00240
00242 #define DIVUW(DIV_S, DIV_A, DIV_B) DIV_S = divu68(DIV_A, DIV_B)
00243
00252 #define AND(AND_S, AND_A, AND_B) AND_S = and68(AND_A, AND_B)
00253
00255 #define ANDB(AND_S, AND_A, AND_B) AND(AND_S, AND_A, AND_B)
00256
00258 #define ANDW(AND_S, AND_A, AND_B) AND(AND_S, AND_A, AND_B)
00259
00261 #define ANDL(AND_S, AND_A, AND_B) AND(AND_S, AND_A, AND_B)
00262
00263
00265 #define ORR(ORR_S, ORR_A, ORR_B) ORR_S = orr68(ORR_A, ORR_B)
00266
00268 #define ORB(ORR_S, ORR_A, ORR_B) ORR(ORR_S, ORR_A, ORR_B)
00269
00271 #define ORW(ORR_S, ORR_A, ORR_B) ORR(ORR_S, ORR_A, ORR_B)
00272
00274 #define ORL(ORR_S, ORR_A, ORR_B) ORR(ORR_S, ORR_A, ORR_B)
00275
00276
00278 #define EOR(EOR_S, EOR_A, EOR_B) EOR_S = eor68(EOR_A, EOR_B)
00279
00281 #define EORB(EOR_S, EOR_A, EOR_B) EOR(EOR_S, EOR_A, EOR_B)
00282
00284 #define EORW(EOR_S, EOR_A, EOR_B) EOR(EOR_S, EOR_A, EOR_B)
00285
00287 #define EORL(EOR_S, EOR_A, EOR_B) EOR(EOR_S, EOR_A, EOR_B)
00288
00289
00291 #define NOT(NOT_S,NOT_A) NOT_S = not68(NOT_A)
00292
00294 #define NOTB(A,B) NOT(A,B)
00295
00297 #define NOTW(A,B) NOT(A,B)
00298
00300 #define NOTL(A,B) NOT(A,B)
00301
00306 #define ADD(ADD_S,ADD_A,ADD_B,ADD_X) ADD_S=add68(ADD_A,ADD_B,ADD_X)
00307 #define SUB(SUB_S,SUB_A,SUB_B,SUB_X) SUB_S=sub68(SUB_B,SUB_A,SUB_X)
00308 #define CMP(SUB_A,SUB_B) sub68(SUB_B,SUB_A,0)
00309
00310 #define ADDB(ADD_S, ADD_A, ADD_B) ADD(ADD_S, ADD_A, ADD_B,0)
00311 #define ADDW(ADD_S, ADD_A, ADD_B) ADD(ADD_S, ADD_A, ADD_B,0)
00312 #define ADDL(ADD_S, ADD_A, ADD_B) ADD(ADD_S, ADD_A, ADD_B,0)
00313 #define ADDXB(ADD_S, ADD_A, ADD_B) \
00314 ADD(ADD_S, ADD_A, ADD_B, (reg68.sr&SR_X)<<(24-SR_X_BIT))
00315 #define ADDXW(ADD_S, ADD_A, ADD_B) \
00316 ADD(ADD_S, ADD_A, ADD_B, (reg68.sr&SR_X)<<(16-SR_X_BIT))
00317 #define ADDXL(ADD_S, ADD_A, ADD_B) \
00318 ADD(ADD_S, ADD_A, ADD_B, (reg68.sr&SR_X)>>SR_X_BIT )
00319
00320 #define ADDA(ADD_S, ADD_A, ADD_B) (ADD_S) = (ADD_A) + (ADD_B)
00321 #define ADDAW(ADD_S, ADD_A, ADD_B) ADDA(ADD_S, ADD_A>>16, ADD_B)
00322 #define ADDAL(ADD_S, ADD_A, ADD_B) ADDA(ADD_S, ADD_A, ADD_B)
00323
00324 #define SUBB(SUB_S, SUB_A, SUB_B) SUB(SUB_S, SUB_A, SUB_B,0)
00325 #define SUBW(SUB_S, SUB_A, SUB_B) SUB(SUB_S, SUB_A, SUB_B,0)
00326 #define SUBL(SUB_S, SUB_A, SUB_B) SUB(SUB_S, SUB_A, SUB_B,0)
00327
00328 #define SUBXB(SUB_S, SUB_A, SUB_B) \
00329 SUB(SUB_S, SUB_A, SUB_B, (reg68.sr&SR_X)<<(24-SR_X_BIT))
00330 #define SUBXW(SUB_S, SUB_A, SUB_B) \
00331 SUB(SUB_S, SUB_A, SUB_B, (reg68.sr&SR_X)<<(16-SR_X_BIT))
00332 #define SUBXL(SUB_S, SUB_A, SUB_B) \
00333 SUB(SUB_S, SUB_A, SUB_B, (reg68.sr&SR_X)>>SR_X_BIT)
00334
00335 #define SUBA(SUB_S, SUB_A, SUB_B) (SUB_S) = (SUB_B) - (SUB_A)
00336 #define SUBAW(SUB_S, SUB_A, SUB_B) \
00337 {\
00338 s32 ZOB = (SUB_A)>>16;\
00339 SUBA(SUB_S, ZOB, SUB_B);\
00340 }
00341 #define SUBAL(SUB_S, SUB_A, SUB_B) SUBA(SUB_S, SUB_A, SUB_B)
00342
00343 #define CMPB(CMP_A, CMP_B) CMP(CMP_A, CMP_B)
00344 #define CMPW(CMP_A, CMP_B) CMP(CMP_A, CMP_B)
00345 #define CMPL(CMP_A, CMP_B) CMP(CMP_A, CMP_B)
00346 #define CMPA(CMP_A, CMP_B) CMP(CMP_A, CMP_B)
00347 #define CMPAW(CMP_A, CMP_B) \
00348 {\
00349 s32 ZOB = (CMP_A)>>16;\
00350 CMPA( ZOB, CMP_B);\
00351 }
00352 #define CMPAL(CMP_A, CMP_B) CMP(CMP_A, CMP_B)
00353
00354 #define NEGB(NEG_S,NEG_A) SUBB(NEG_S,NEG_A,0)
00355 #define NEGW(NEG_S,NEG_A) SUBW(NEG_S,NEG_A,0)
00356 #define NEGL(NEG_S,NEG_A) SUBL(NEG_S,NEG_A,0)
00357
00358 #define NEGXB(NEG_S,NEG_A) SUBXB(NEG_S,NEG_A,0)
00359 #define NEGXW(NEG_S,NEG_A) SUBXW(NEG_S,NEG_A,0)
00360 #define NEGXL(NEG_S,NEG_A) SUBXL(NEG_S,NEG_A,0)
00361
00370 #define LSR(LSR_A,LSR_D,LSR_MSK,LSR_C) \
00371 {\
00372 reg68.sr &= 0xFF00;\
00373 if((LSR_D)!=0) \
00374 {\
00375 ADDCYCLE(2*(LSR_D));\
00376 (LSR_A) >>= (LSR_D)-1;\
00377 if((LSR_A)&(LSR_C)) reg68.sr |= SR_X | SR_C;\
00378 (LSR_A)>>=1;\
00379 }\
00380 (LSR_A) &= (LSR_MSK);\
00381 reg68.sr |= (((LSR_A)==0)<<SR_Z_BIT) | (((s32)(LSR_A)<0)<<SR_N_BIT);\
00382 }
00383
00385 #define LSRB(LSR_A,LSR_B) LSR(LSR_A,LSR_B,0xFF000000,(1<<24))
00386
00388 #define LSRW(LSR_A,LSR_B) LSR(LSR_A,LSR_B,0xFFFF0000,(1<<16))
00389
00391 #define LSRL(LSR_A,LSR_B) LSR(LSR_A,LSR_B,0xFFFFFFFF,(1<<0))
00392
00394 #define ASRB(LSR_A,LSR_B) LSR(LSR_A,LSR_B,0xFF000000,(1<<24))
00395
00397 #define ASRW(LSR_A,LSR_B) LSR(LSR_A,LSR_B,0xFFFF0000,(1<<16))
00398
00400 #define ASRL(LSR_A,LSR_B) LSR(LSR_A,LSR_B,0xFFFFFFFF,(1<<0))
00401
00403 #define LSL(LSL_A,LSL_D,LSL_MSK) \
00404 {\
00405 reg68.sr &= 0xFF00;\
00406 if((LSL_D)!=0) \
00407 {\
00408 ADDCYCLE(2*(LSL_D));\
00409 (LSL_A) <<= (LSL_D)-1;\
00410 if((LSL_A)&0x80000000) reg68.sr |= SR_X | SR_C;\
00411 (LSL_A)<<=1;\
00412 }\
00413 (LSL_A) &= (LSL_MSK);\
00414 reg68.sr |= (((LSL_A)==0)<<SR_Z_BIT) | (((s32)(LSL_A)<0)<<SR_N_BIT);\
00415 }
00416
00418 #define LSLB(LSL_A,LSL_B) LSL(LSL_A,LSL_B,0xFF000000)
00419
00421 #define LSLW(LSL_A,LSL_B) LSL(LSL_A,LSL_B,0xFFFF0000)
00422
00424 #define LSLL(LSL_A,LSL_B) LSL(LSL_A,LSL_B,0xFFFFFFFF)
00425
00427 #define ASLB(LSL_A,LSL_B) LSL(LSL_A,LSL_B,0xFF000000)
00428
00430 #define ASLW(LSL_A,LSL_B) LSL(LSL_A,LSL_B,0xFFFF0000)
00431
00433 #define ASLL(LSL_A,LSL_B) LSL(LSL_A,LSL_B,0xFFFFFFFF)
00434
00436 #define ROR(ROR_A,ROR_D,ROR_MSK,ROR_SZ) \
00437 {\
00438 reg68.sr &= 0xFF00 | SR_X;\
00439 if((ROR_D)!=0) \
00440 {\
00441 ADDCYCLE(2*(ROR_D));\
00442 ROR_D &= (ROR_SZ)-1;\
00443 if((ROR_A)&(1<<((ROR_D)-1+32-(ROR_SZ)))) reg68.sr |= SR_C;\
00444 (ROR_A) &= (ROR_MSK);\
00445 (ROR_A) = ((ROR_A)>>(ROR_D)) + ((ROR_A)<<((ROR_SZ)-(ROR_D)));\
00446 }\
00447 (ROR_A) &= (ROR_MSK);\
00448 reg68.sr |= (((ROR_A)==0)<<SR_Z_BIT) | (((s32)(ROR_A)<0)<<SR_N_BIT);\
00449 }
00450
00452 #define ROL(ROR_A,ROR_D,ROR_MSK,ROR_SZ) \
00453 {\
00454 reg68.sr &= 0xFF00 | SR_X;\
00455 if((ROR_D)!=0) \
00456 {\
00457 ADDCYCLE(2*(ROR_D));\
00458 ROR_D &= (ROR_SZ)-1;\
00459 if((ROR_A)&(1<<(32-(ROR_D)))) reg68.sr |= SR_C;\
00460 (ROR_A) &= (ROR_MSK);\
00461 (ROR_A) = ((ROR_A)<<(ROR_D)) + ((ROR_A)>>((ROR_SZ)-(ROR_D)));\
00462 }\
00463 (ROR_A) &= (ROR_MSK);\
00464 reg68.sr |= (((ROR_A)==0)<<SR_Z_BIT) | (((s32)(ROR_A)<0)<<SR_N_BIT);\
00465 }
00466
00467 #define RORB(ROR_A,ROR_B) ROR(ROR_A,ROR_B,0xFF000000,8)
00468 #define RORW(ROR_A,ROR_B) ROR(ROR_A,ROR_B,0xFFFF0000,16)
00469 #define RORL(ROR_A,ROR_B) ROR(ROR_A,ROR_B,0xFFFFFFFF,32)
00470 #define ROLB(ROR_A,ROR_B) ROL(ROR_A,ROR_B,0xFF000000,8)
00471 #define ROLW(ROR_A,ROR_B) ROL(ROR_A,ROR_B,0xFFFF0000,16)
00472 #define ROLL(ROR_A,ROR_B) ROL(ROR_A,ROR_B,0xFFFFFFFF,32)
00473
00475 #define ROXR(ROR_A,ROR_D,ROR_MSK,ROR_SZ) \
00476 {\
00477 u32 ROR_X = (reg68.sr>>SR_X_BIT)&1;\
00478 reg68.sr &= 0xFF00;\
00479 if((ROR_D)!=0) \
00480 {\
00481 ADDCYCLE(2*(ROR_D));\
00482 ROR_D &= (ROR_SZ)-1;\
00483 if((ROR_A)&(1<<((ROR_D)-1+32-(ROR_SZ)))) reg68.sr |= SR_C | SR_X;\
00484 (ROR_A) &= (ROR_MSK);\
00485 (ROR_A) = ((ROR_A)>>(ROR_D)) + ((ROR_A)<<((ROR_SZ)-(ROR_D)+1));\
00486 (ROR_A) |= (ROR_X)<<(32-(ROR_D));\
00487 }\
00488 (ROR_A) &= (ROR_MSK);\
00489 reg68.sr |= (((ROR_A)==0)<<SR_Z_BIT) | (((s32)(ROR_A)<0)<<SR_N_BIT);\
00490 }
00491
00493 #define ROXL(ROR_A,ROR_D,ROR_MSK,ROR_SZ) \
00494 {\
00495 u32 ROR_X = (reg68.sr>>SR_X_BIT)&1;\
00496 reg68.sr &= 0xFF00;\
00497 if((ROR_D)!=0) \
00498 {\
00499 ADDCYCLE(2*(ROR_D));\
00500 ROR_D &= (ROR_SZ)-1;\
00501 if((ROR_A)&(1<<(32-(ROR_D)))) reg68.sr |= SR_C | SR_X ;\
00502 (ROR_A) &= (ROR_MSK);\
00503 (ROR_A) = ((ROR_A)<<(ROR_D)) + ((ROR_A)>>((ROR_SZ)-(ROR_D)+1));\
00504 (ROR_A) |= (ROR_X)<<((ROR_D)-1+(32-(ROR_SZ)));\
00505 }\
00506 (ROR_A) &= (ROR_MSK);\
00507 reg68.sr |= (((ROR_A)==0)<<SR_Z_BIT) | (((s32)(ROR_A)<0)<<SR_N_BIT);\
00508 }
00509
00510 #define ROXRB(ROR_A,ROR_B) ROXR(ROR_A,ROR_B,0xFF000000,8)
00511 #define ROXRW(ROR_A,ROR_B) ROXR(ROR_A,ROR_B,0xFFFF0000,16)
00512 #define ROXRL(ROR_A,ROR_B) ROXR(ROR_A,ROR_B,0xFFFFFFFF,32)
00513 #define ROXLB(ROR_A,ROR_B) ROXL(ROR_A,ROR_B,0xFF000000,8)
00514 #define ROXLW(ROR_A,ROR_B) ROXL(ROR_A,ROR_B,0xFFFF0000,16)
00515 #define ROXLL(ROR_A,ROR_B) ROXL(ROR_A,ROR_B,0xFFFFFFFF,32)
00516
00519 #ifdef __cplusplus
00520 }
00521 #endif
00522
00523 #endif