Welcome to OGeek Q&A Community for programmer and developer-Open, Learning and Share
Welcome To Ask or Share your Answers For Others

Categories

0 votes
824 views
in Technique[技术] by (71.8m points)

javascript - Converting a decimal value to a 32bit floating-point hexadecimal

For a simple utility I'm working on, I need a script that converts a given decimal value to a 32bit floating-point hexadecimal value. For example, I know 1 is 3F800000 and 100 is 42C80000, however I don't know how to return these results with any number. If somebody knows a simple formula or even a complex way to go about doing this, please share.

See Question&Answers more detail:os

与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…
Welcome To Ask or Share your Answers For Others

1 Reply

0 votes
by (71.8m points)

I don't know if I got the corner cases correctly, but anyway, here is some code:

function floatToIntBits(f) {
    var NAN_BITS = 0|0x7FC00000;
    var INF_BITS = 0|0x7F800000;
    var ZERO_BITS = 0|0x00000000;
    var SIGN_BIT = 0|0x80000000;
    var EXP_MASK = 0|0x7F800000;
    var MANT_MASK = 0|0x007FFFFF;

    if (f != f)
        return NAN_BITS;

    var signBit = (f > 0.0 || (f == 0.0 && Math.pow(f, -1) > 0)) ? 0 : SIGN_BIT;
    var fabs = Math.abs(f);
    if (fabs == Number.POSITIVE_INFINITY)
        return signBit | INF_BITS;
    if (fabs == 0.0)
        return signBit | ZERO_BITS;

    var e = 0, x = f;
    while (x != 0.0) {
        e++;
        x /= 2.0;
    }

    var exp = e - (1023 + 52);
    if (exp >= 127) // XXX: maybe incorrect
        return signBit | INF_BITS;
    if (exp <= -126) // XXX: maybe incorrect
        return signBit | ZERO_BITS;

    var ceil = Math.pow(2.0, exp);
    //console.log("fabs", fabs, "ceil", ceil);
    var mantissa = fabs / ceil * Math.pow(2.0, 24);
    if (fabs == ceil) {
        mantissa = 0;
    } else {
        exp--;
    }
    var expBits = ((exp + 127) << 23) & EXP_MASK;
    var mantissaBits = mantissa & MANT_MASK;

    //console.log("sign", signBit, "expBits", expBits.toString(16), "mantissaBits", mantissaBits.toString(16));
    return signBit | expBits | mantissaBits;
}

function testCase(expected, f) {
    var actual = floatToIntBits(f);
    if (expected !== actual) {
        console.log("expected", expected.toString(16), "actual", actual.toString(16), "f", f);
    }
}

testCase(0|0x80000000, -0.0);
testCase(0|0x00000000, 0.0);
testCase(0|0x3F800000, 1.0);
testCase(0|0x42C80000, 100.0);
testCase(0|0x7FC00000, 0.0 / 0.0);
testCase(0|0x7F800000, 1.0 / 0.0);
testCase(0|0xFF800000, 1.0 / -0.0);

The funny-looking 0|0x... expressions are necessary because JavaScript treats these literal numbers as being large positive integers, but applying a bitwise operator apparently converts them to signed 32-bit ints. (Compare the ECMAScript specification, section 8.5, last paragraph.)

Update: The following code is based on the above code, but it is more aligned to the actual wording of the specification. Additionally, it is independent of the particular floating point type that is used to implement JavaScript's Number. The code first moves the value to the interval [1.0; 2.0), since this is the representation that is mentioned in IEEE 754-1985 for normalized numbers. This code also handles denormalized numbers correctly and all the operations it uses are defined in IEEE 754-1985 and are exact, that is they don't lose precision.

function assert(cond, msg, arg0) {
    if (!cond)
        console.log("error", msg, arg0);
}

function floatToIntBits(f) {
    var NAN_BITS = 0|0x7FC00000;
    var INF_BITS = 0|0x7F800000;
    var ZERO_BITS = 0|0x00000000;
    var SIGN_MASK = 0|0x80000000;
    var EXP_MASK = 0|0x7F800000;
    var MANT_MASK = 0|0x007FFFFF;
    var MANT_MAX = Math.pow(2.0, 23) - 1.0;

    if (f != f)
        return NAN_BITS;
    var hasSign = f < 0.0 || (f == 0.0 && 1.0 / f < 0);
    var signBits = hasSign ? SIGN_MASK : 0;
    var fabs = Math.abs(f);

    if (fabs == Number.POSITIVE_INFINITY)
        return signBits | INF_BITS;

    var exp = 0, x = fabs;
    while (x >= 2.0 && exp <= 127) {
        exp++;
        x /= 2.0;
    }
    while (x < 1.0 && exp >= -126) {
        exp--;
        x *= 2.0;
    }
    assert(x * Math.pow(2.0, exp) == fabs, "fabs");
    var biasedExp = exp + 127;
    assert(0 <= biasedExp && biasedExp <= 254, biasedExp);

    if (biasedExp == 255)
        return signBit | INF_BITS;
    if (biasedExp == 0) {
        assert(0.0 <= x && x < 2.0, "x in [0.0, 1.0)", x);
        var mantissa = x * Math.pow(2.0, 23) / 2.0;
    } else {
        assert(1.0 <= x && x < 2.0, "x in [0.5; 1.0)", x);
        var mantissa = x * Math.pow(2.0, 23) - Math.pow(2.0, 23);
    }
    assert(0.0 <= mantissa && mantissa <= MANT_MAX, "mantissa in [0.0, 2^23)", mantissa);

    //console.log("number", f, "x", x, "biasedExp", biasedExp, "mantissa", mantissa.toString(16));
    var expBits = (biasedExp << 23) & EXP_MASK;
    var mantissaBits = mantissa & MANT_MASK;

    //console.log("number", f, "sign", signBits.toString(16), "expBits", expBits.toString(16), "mantissaBits", mantissaBits.toString(16));
    return signBits | expBits | mantissaBits;
}

function testCase(expected, f) {
    var actual = floatToIntBits(f);
    if (expected !== actual) {
        console.log("error", "number", f, "expected", expected.toString(16), "got", actual.toString(16));
    }
}

testCase(0|0xFF800000, 1.0 / -0.0); // -Inf
testCase(0|0xBF800000, -1.0);
testCase(0|0x80000000, -0.0);
testCase(0|0x00000000, 0.0);
testCase(0|0x00000001, Math.pow(2.0, -(126 + 23))); // minimum denormalized
testCase(0|0x007FFFFF, Math.pow(2.0, -126) - Math.pow(2.0, -(126 + 23))); // maximum denormalized
testCase(0|0x00800000, Math.pow(2.0, -126)); // minimum normalized float
testCase(0|0x3F800000, 1.0);
testCase(0|0x42C80000, 100.0);
testCase(0|0x7F800000, 1.0 / 0.0); // Inf
testCase(0|0x7FC00000, 0.0 / 0.0); // NaN

与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…
OGeek|极客中国-欢迎来到极客的世界,一个免费开放的程序员编程交流平台!开放,进步,分享!让技术改变生活,让极客改变未来! Welcome to OGeek Q&A Community for programmer and developer-Open, Learning and Share
Click Here to Ask a Question

...