Home > Net >  Decimal to binary conversion of integer in C
Decimal to binary conversion of integer in C

Time:04-09

I have this short code which generally (in 99% of the time) works fine for converting integers from decimal to binary system.

#include <stdio.h>
#include <math.h>
int main()
{
    int numDec, remainder=0, numBin=0;
    printf ("Provide an integer you want to convert!\n\n");
    scanf ("%d", &numDec);
    printf ("\n");
    while (numDec>0)
    {
        numBin = numDec % 2 * pow (10, remainder);
        numDec = numDec / 2;
        remainder = 1;
    }
    printf ("%d", numBin);
    return 0;
}

A problem emerges though if I use the compiler DevC , but not in any other case. And this weird issue is, that for some inputs like 210 I get a wrong answer. Instead of the desired 11010010 I get 11010009. I am not sure why this is occuring or what could be in the background causing this. I'd really like to know what is going on in DevC that inflicts it. Any help is highly appreciated!

CodePudding user response:

Edit: As noted by @Eric Postpischil, my first answer contains quite a few of undefined behavior. I'll leave that answer below for reference.

Here you are a fixed answer:

#include <limits.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>

size_t get_uint_binary_rep(unsigned i, char (*rep)[CHAR_BIT * sizeof(unsigned)   1])
{
    char* it = *rep;
    if (i == 0)
    {
        *it   = '0';
    }
    else
    {
        /* Skip leading zeros */
        unsigned digitCount = CHAR_BIT * sizeof(unsigned);
        unsigned const mask = (unsigned)1 << (digitCount - 1);
        while ((i & mask) == 0)
        {
            i <<= 1;
            --digitCount;
        }
    
        while (digitCount != 0)
        {
            *it   = '0'   ((i & mask) != 0);
            i <<= 1;
            --digitCount;
        }
    }

    *it = '\0';
    return (it - *rep) - 1;
}

size_t get_int_binary_rep(int i, char (*rep)[CHAR_BIT * sizeof(int)   2])
{
    size_t charCount = 0;
    unsigned u = 0;
    char* it = *rep;
    if (i < 0)
    {
        if (i == INT_MIN)
        {
            u = INT_MAX;
              u;
        }
        else
        {
            u = -i;
        }

        *it   = '-';
        charCount = 1;
    }
    else
    {
        u = i;
    }

    char urep [CHAR_BIT * sizeof(unsigned)   1] = {0};
    size_t const urepCharCount = get_uint_binary_rep(u, &urep);
    strncpy(it, urep, urepCharCount   1);
    return charCount   urepCharCount;
}

int main()
{
    int i = 0;
    char rep[CHAR_BIT * sizeof(int)   2] = {0};

    get_int_binary_rep(i, &rep);
    printf("%d(10) = %s(2)\n", i, rep);

    i = -5;
    get_int_binary_rep(i, &rep);
    printf("%d(10) = %s(2)\n", i, rep);

    i = 10;
    get_int_binary_rep(i, &rep);
    printf("%d(10) = %s(2)\n", i, rep);

    i = 210;
    get_int_binary_rep(i, &rep);
    printf("%d(10) = %s(2)\n", i, rep);

    i = INT_MAX;
    get_int_binary_rep(i, &rep);
    printf("%d(10) = %s(2)\n", i, rep);

    i = INT_MIN;
    get_int_binary_rep(i, &rep);
    printf("%d(10) = %s(2)\n", i, rep);
}

How about evaluating the binary representation of an int like this?

#include <stdio.h>
#include <limits.h>

void get_binary_rep(int i, char (*rep)[8 * sizeof(int)   1])
{
    char* it = *rep;
    if (i == 0)
    {
        *it   = '0';
    }
    else
    {
        if (i < 0)
        {
            *it   = '-';
            i = -i;
        }
    
        /* Skip leading zeros */
        int digitCount = 8 * sizeof(int);
        int const mask = 1 << (digitCount - 1);
        while ((i & mask) == 0)
        {
            i <<= 1;
            --digitCount;
        }
    
        while (digitCount != 0)
        {
            char const ch = (i & mask) ? '1' : '0';
            *it   = ch;
            i <<= 1;
            --digitCount;
        }
    }

    *it = '\0';
}

int main()
{
    int i = 0;
    char rep[8 * sizeof(int)   1] = {0};

    get_binary_rep(i, &rep);
    printf("%d(10) = %s(2)\n", i, rep);

    i = -5;
    get_binary_rep(i, &rep);
    printf("%d(10) = %s(2)\n", i, rep);

    i = 10;
    get_binary_rep(i, &rep);
    printf("%d(10) = %s(2)\n", i, rep);

    i = 210;
    get_binary_rep(i, &rep);
    printf("%d(10) = %s(2)\n", i, rep)

    i = INT_MAX;
    get_binary_rep(i, &rep);
    printf("%d(10) = %s(2)\n", i, rep);

    i = INT_MIN;
    get_binary_rep(i, &rep);
    printf("%d(10) = %s(2)\n", i, rep);
}

This version:

  1. Does not assume i is positive;
  2. Does not rely on pow;
  3. Can convert integers from INT_MIN to INT_MAX.

Output:

0(10) = 0(2)
-5(10) = -101(2)
10(10) = 1010(2)
210(10) = 11010010(2)
2147483647(10) = 1111111111111111111111111111111(2)
-2147483648(10) = -10000000000000000000000000000000(2)

Note: I pass parameter rep as a pointer to a char[33] in order to retain the information about the size. In a first version of get_binary_rep I was memsetting all the bytes in the buffer to '\0':

static size_t const rep_size = sizeof(*rep) / sizeof((*rep)[0]);
memset(*rep, 0, rep_size);

Still, even without a call to memset, I think passing rep as a pointer may be useful as the compiler will warn you if you pass a pointer to a buffer with a different size.

CodePudding user response:

It doesn't make sense to convert from binary to decimal, since computers only use one system - binary. This can then be represented to human users in other forms such as decimal or hex. Therefore it probably makes far more sense to convert from an integer to a string with decimal format.

Whereas formats such as "converting" some binary like 1000 (8 dec) to a similar-looking decimal number 1000 (one thousand) is unlikely a useful format for any purpose. Similarly, an array of ones and zeroes is unlikely useful. BCD might have some uses, but that's not what you are asking for.

Assuming you simply wish to display a binary number to a human user, then a string conversion is what makes most sense. Here is a simple example using 8 bit numbers:

#include <stdio.h>
#include <stdint.h>

void get_binstr (char dst[8 1], uint8_t val) // 8 char   1 null term
{
  for(size_t i=0; i<8; i  )                  // iterate over 8 bits
  {
    size_t index = 8 - 1 - i;                // bit and array index, down counting from 7 to 0
    uint8_t bit = (val >> index) & 1;        // mask out the relevant bit
    dst[index] = bit   '0';                  // convert to string symbol value 
  }
  dst[8]='\0';                               // null terminate
}

int main (void)
{
  char str[8 1];
  get_binstr(str, 210);
  puts(str);
}
  • Related