summaryrefslogtreecommitdiffstats
path: root/vm/compiler/template/mips/TEMPLATE_CMPL_DOUBLE_VFP.S
blob: ee8f99efecdb95153f270e6d8d2c17c18b0a8b5a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
%default { "naninst":"li     rTEMP, -1" }
%verify "executed"
%verify "basic lt, gt, eq */
%verify "left arg NaN"
%verify "right arg NaN"
    /*
     * Compare two double precision floating-point values.  Puts 0, 1, or -1 into the
     * destination register based on the results of the comparison.
     *
     * Provide a "naninst" instruction that puts 1 or -1 into a1 depending
     * on what value we'd like to return when one of the operands is NaN.
     *
     * The operation we're implementing is:
     *   if (x == y)
     *     return 0;
     *   else if (x < y)
     *     return -1;
     *   else if (x > y)
     *     return 1;
     *   else
     *     return {-1,1};  // one or both operands was NaN
     *
     * On entry:
     *    a0 = &op1 [vBB]
     *    a1 = &op2 [vCC]
     *
     * for: cmpl-double, cmpg-double
     */
    /* op vAA, vBB, vCC */

    /* "clasic" form */
#ifdef  SOFT_FLOAT
    move rOBJ, a0                       # save a0
    move rBIX, a1                       # save a1
    LOAD64(rARG0, rARG1, rOBJ)          # a0/a1<- vBB/vBB+1
    LOAD64(rARG2, rARG3, rBIX)          # a2/a3<- vCC/vCC+1
    JAL(__eqdf2)                        # v0<- (vBB == vCC)
    li       rTEMP, 0                   # vAA<- 0
    beqz     v0, ${opcode}_finish
    LOAD64(rARG0, rARG1, rOBJ)          # a0/a1<- vBB/vBB+1
    LOAD64(rARG2, rARG3, rBIX)          # a2/a3<- vCC/vCC+1
    JAL(__ltdf2)                        # a0<- (vBB < vCC)
    li       rTEMP, -1                  # vAA<- -1
    bltz     v0, ${opcode}_finish
    LOAD64(rARG0, rARG1, rOBJ)          # a0/a1<- vBB/vBB+1
    LOAD64(rARG2, rARG3, rBIX)          # a2/a3<- vCC/vCC+1
    JAL(__gtdf2)                        # v0<- (vBB > vCC)
    li      rTEMP, 1                    # vAA<- 1
    bgtz    v0, ${opcode}_finish
#else
    LOAD64_F(ft0, ft0f, a0)             # ft0<- vBB
    LOAD64_F(ft1, ft1f, a1)             # ft1<- vCC
    c.olt.d     fcc0, ft0, ft1          # Is ft0 < ft1
    li          rTEMP, -1
    bc1t        fcc0, ${opcode}_finish
    c.olt.d     fcc0, ft1, ft0
    li          rTEMP, 1
    bc1t        fcc0, ${opcode}_finish
    c.eq.d      fcc0, ft0, ft1
    li          rTEMP, 0
    bc1t        fcc0, ${opcode}_finish
#endif

    $naninst

${opcode}_finish:
    move     v0, rTEMP                  # v0<- vAA
    RETURN