各言語での整数型の最大値と最小値
唐突に、各言語での整数型の最大値と最小値をまとめてみようと思ったメモ。
環境
手元にあるものということで、環境は以下のものに限定する。なお、32ビット環境は、このために急きょ作った。
- CentOS 6 (32ビット)
- Java (openjdk version "1.8.0_151")
- C (gcc (GCC) 4.4.7)
-std=gnu99
でコンパイル
- C++ (g++ (GCC) 4.4.7)
-std=gnu++0x
でコンパイル
- PHP (PHP 5.3.3 (cli))
- Python 2 (Python 2.6.6)
- Python 3 (Python 3.6.3)
- ソースからビルドしたもの
- Ruby (ruby 1.8.7 (2013-06-27 patchlevel 374))
- Perl (v5.10.1)
- Go (go version go1.7.6 linux/386)
- bash (4.1.2(2)-release)
- CentOS 7 (64ビット)
以下、検証に使ったソースと、実行結果(32ビット環境と64ビット環境のそれぞれの実行結果のsdiff)と、補足事項を各言語ごとに記載していく。
どの言語においても、概ね以下の形で出力を出している。
- 最大値を求めて出力
- 最大値に+1して、循環することの確認
- 最小値を求めて出力
- 最小値に-1して、循環することの確認
Java
public class Main { public static void main(String[] args) { { byte a; a = Byte.MAX_VALUE; System.out.println("byte:max = " + a); ++a; System.out.println(" +1 = " + a); a = Byte.MIN_VALUE; System.out.println("byte:min = " + a); --a; System.out.println(" -1 = " + a); } { short a; a = Short.MAX_VALUE; System.out.println("short:max = " + a); ++a; System.out.println(" +1 = " + a); a = Short.MIN_VALUE; System.out.println("short:min = " + a); --a; System.out.println(" -1 = " + a); } { int a; a = Integer.MAX_VALUE; System.out.println("int:max = " + a); ++a; System.out.println(" +1 = " + a); a = Integer.MIN_VALUE; System.out.println("int:min = " + a); --a; System.out.println(" -1 = " + a); } { long a; a = Long.MAX_VALUE; System.out.println("long:max = " + a); ++a; System.out.println(" +1 = " + a); a = Long.MIN_VALUE; System.out.println("long:min = " + a); --a; System.out.println(" -1 = " + a); } } }
===32bit=== ===64bit=== byte:max = 127 byte:max = 127 +1 = -128 +1 = -128 byte:min = -128 byte:min = -128 -1 = 127 -1 = 127 short:max = 32767 short:max = 32767 +1 = -32768 +1 = -32768 short:min = -32768 short:min = -32768 -1 = 32767 -1 = 32767 int:max = 2147483647 int:max = 2147483647 +1 = -2147483648 +1 = -2147483648 int:min = -2147483648 int:min = -2147483648 -1 = 2147483647 -1 = 2147483647 long:max = 9223372036854775807 long:max = 9223372036854775807 +1 = -9223372036854775808 +1 = -9223372036854775808 long:min = -9223372036854775808 long:min = -9223372036854775808 -1 = 9223372036854775807 -1 = 9223372036854775807
さすがにJavaは、環境によって最大値や最小値が変わることはない。
C
#include <stdio.h> #include <limits.h> int main(int argc, char** argv) { { short a; a = SHRT_MAX; printf("short:max = %d\n", a); ++a; printf(" +1 = %d\n", a); a = SHRT_MIN; printf("short:min = %d\n", a); --a; printf(" -1 = %d\n", a); } { unsigned short a; a = USHRT_MAX; printf("ushort:max = %u\n", a); ++a; printf(" +1 = %u\n", a); a = 0; printf("ushort:min = %u\n", a); --a; printf(" -1 = %u\n", a); } { int a; a = INT_MAX; printf("int:max = %d\n", a); ++a; printf(" +1 = %d\n", a); a = INT_MIN; printf("int:min = %d\n", a); --a; printf(" -1 = %d\n", a); } { unsigned int a; a = UINT_MAX; printf("uint:max = %u\n", a); ++a; printf(" +1 = %u\n", a); a = 0; printf("uint:min = %u\n", a); --a; printf(" -1 = %u\n", a); } { long a; a = LONG_MAX; printf("long:max = %ld\n", a); ++a; printf(" +1 = %ld\n", a); a = LONG_MIN; printf("long:min = %ld\n", a); --a; printf(" -1 = %ld\n", a); } { unsigned long a; a = ULONG_MAX; printf("ulong:max = %lu\n", a); ++a; printf(" +1 = %lu\n", a); a = 0; printf("ulong:min = %lu\n", a); --a; printf(" -1 = %lu\n", a); } { long long a; a = LLONG_MAX; printf("long long:max = %lld\n", a); ++a; printf(" +1 = %lld\n", a); a = LLONG_MIN; printf("long long:min = %lld\n", a); --a; printf(" -1 = %lld\n", a); } { unsigned long long a; a = ULLONG_MAX; printf("ulong long:max = %llu\n", a); ++a; printf(" +1 = %llu\n", a); a = 0; printf("ulong long:min = %llu\n", a); --a; printf(" -1 = %llu\n", a); } return 0; }
===32bit=== ===64bit=== short:max = 32767 short:max = 32767 +1 = -32768 +1 = -32768 short:min = -32768 short:min = -32768 -1 = 32767 -1 = 32767 ushort:max = 65535 ushort:max = 65535 +1 = 0 +1 = 0 ushort:min = 0 ushort:min = 0 -1 = 65535 -1 = 65535 int:max = 2147483647 int:max = 2147483647 +1 = -2147483648 +1 = -2147483648 int:min = -2147483648 int:min = -2147483648 -1 = 2147483647 -1 = 2147483647 uint:max = 4294967295 uint:max = 4294967295 +1 = 0 +1 = 0 uint:min = 0 uint:min = 0 -1 = 4294967295 -1 = 4294967295 long:max = 2147483647 | long:max = 9223372036854775807 +1 = -2147483648 | +1 = -9223372036854775808 long:min = -2147483648 | long:min = -9223372036854775808 -1 = 2147483647 | -1 = 9223372036854775807 ulong:max = 4294967295 | ulong:max = 18446744073709551615 +1 = 0 +1 = 0 ulong:min = 0 ulong:min = 0 -1 = 4294967295 | -1 = 18446744073709551615 long long:max = 9223372036854775807 long long:max = 9223372036854775807 +1 = -9223372036854775808 +1 = -9223372036854775808 long long:min = -9223372036854775808 long long:min = -9223372036854775808 -1 = 9223372036854775807 -1 = 9223372036854775807 ulong long:max = 18446744073709551615 ulong long:max = 18446744073709551615 +1 = 0 +1 = 0 ulong long:min = 0 ulong long:min = 0 -1 = 18446744073709551615 -1 = 18446744073709551615
違いが出たのはlong/unsigned longの部分。32ビット環境ではintと同じで、64ビット環境ではlong longと同じ。
C++
#include <iostream> #include <limits> using namespace std; int main(int argc, char** argv) { { short a; a = numeric_limits<short>::max(); cout << "short:max = " << a << endl; ++a; cout << " +1 = " << a << endl; a = numeric_limits<short>::min(); cout << "short:min = " << a << endl; --a; cout << " -1 = " << a << endl; } { unsigned short a; a = numeric_limits<unsigned short>::max(); cout << "ushort:max = " << a << endl; ++a; cout << " +1 = " << a << endl; a = numeric_limits<unsigned short>::min(); cout << "ushort:min = " << a << endl; --a; cout << " -1 = " << a << endl; } { int a; a = numeric_limits<int>::max(); cout << "int:max = " << a << endl; ++a; cout << " +1 = " << a << endl; a = numeric_limits<int>::min(); cout << "int:min = " << a << endl; --a; cout << " -1 = " << a << endl; } { unsigned int a; a = numeric_limits<unsigned int>::max(); cout << "uint:max = " << a << endl; ++a; cout << " +1 = " << a << endl; a = numeric_limits<unsigned int>::min(); cout << "uint:min = " << a << endl; --a; cout << " -1 = " << a << endl; } { long a; a = numeric_limits<long>::max(); cout << "long:max = " << a << endl; ++a; cout << " +1 = " << a << endl; a = numeric_limits<long>::min(); cout << "long:min = " << a << endl; --a; cout << " -1 = " << a << endl; } { unsigned long a; a = numeric_limits<unsigned long>::max(); cout << "ulong:max = " << a << endl; ++a; cout << " +1 = " << a << endl; a = numeric_limits<unsigned long>::min(); cout << "ulong:min = " << a << endl; --a; cout << " -1 = " << a << endl; } { long long a; a = numeric_limits<long long>::max(); cout << "long long:max = " << a << endl; ++a; cout << " +1 = " << a << endl; a = numeric_limits<long long>::min(); cout << "long long:min = " << a << endl; --a; cout << " -1 = " << a << endl; } { unsigned long long a; a = numeric_limits<unsigned long long>::max(); cout << "ulong long:max = " << a << endl; ++a; cout << " +1 = " << a << endl; a = numeric_limits<unsigned long long>::min(); cout << "ulong long:min = " << a << endl; --a; cout << " -1 = " << a << endl; } return EXIT_SUCCESS; }
===32bit=== ===64bit=== short:max = 32767 short:max = 32767 +1 = -32768 +1 = -32768 short:min = -32768 short:min = -32768 -1 = 32767 -1 = 32767 ushort:max = 65535 ushort:max = 65535 +1 = 0 +1 = 0 ushort:min = 0 ushort:min = 0 -1 = 65535 -1 = 65535 int:max = 2147483647 int:max = 2147483647 +1 = -2147483648 +1 = -2147483648 int:min = -2147483648 int:min = -2147483648 -1 = 2147483647 -1 = 2147483647 uint:max = 4294967295 uint:max = 4294967295 +1 = 0 +1 = 0 uint:min = 0 uint:min = 0 -1 = 4294967295 -1 = 4294967295 long:max = 2147483647 | long:max = 9223372036854775807 +1 = -2147483648 | +1 = -9223372036854775808 long:min = -2147483648 | long:min = -9223372036854775808 -1 = 2147483647 | -1 = 9223372036854775807 ulong:max = 4294967295 | ulong:max = 18446744073709551615 +1 = 0 +1 = 0 ulong:min = 0 ulong:min = 0 -1 = 4294967295 | -1 = 18446744073709551615 long long:max = 9223372036854775807 long long:max = 9223372036854775807 +1 = -9223372036854775808 +1 = -9223372036854775808 long long:min = -9223372036854775808 long long:min = -9223372036854775808 -1 = 9223372036854775807 -1 = 9223372036854775807 ulong long:max = 18446744073709551615 ulong long:max = 18446744073709551615 +1 = 0 +1 = 0 ulong long:min = 0 ulong long:min = 0 -1 = 18446744073709551615 -1 = 18446744073709551615
違いが出たのはlong/unsigned longの部分。32ビット環境ではintと同じで、64ビット環境ではlong longと同じ。
PHP
<?php $a = 1; while (($a << 1) + 1 > $a) { $a <<= 1; $a += 1; } echo "int:max = " . $a . PHP_EOL; echo " = "; var_dump($a); ++$a; echo " +1 = " . $a . PHP_EOL; echo " = "; var_dump($a); $a = -1; while (($a << 1) < $a) { $a <<= 1; } echo "int:min = " . $a . PHP_EOL; echo " = "; var_dump($a); --$a; echo " -1 = " . $a . PHP_EOL; echo " = "; var_dump($a);
===32bit=== ===64bit=== int:max = 2147483647 | int:max = 9223372036854775807 = int(2147483647) | = int(9223372036854775807) +1 = 2147483648 | +1 = 9.2233720368548E+18 = float(2147483648) | = float(9.2233720368548E+18) int:min = -2147483648 | int:min = -9223372036854775808 = int(-2147483648) | = int(-9223372036854775808) -1 = -2147483649 | -1 = -9.2233720368548E+18 = float(-2147483649) | = float(-9.2233720368548E+18)
最大値、最小値を表す定数が無いので、計算によって求めている。
最初だまされたのは、32ビット環境で最大値に+1、最小値に-1したときに、一見するとint型に収まっているように見えたこと。 var_dump()すると、float型に変わっていることが分かる。
Python 2
a = 1 ct = 0 while ct < 128 and (a << 1) + 1 > a: a <<= 1 a += 1 ct += 1 print "long:max? = ",a print " = ",type(a) a += 1 print " +1 = ",a print " = ",type(a) a = -1 ct = 0 while ct < 128 and (a << 1) < a: a <<= 1 ct += 1 print "long:min? = ",a print " = ",type(a) a -= 1 print " -1 = ",a print " = ",type(a)
===32bit=== ===64bit=== long:max? = 680564733841876926926749214863536422911 long:max? = 680564733841876926926749214863536422911 = <type 'long'> = <type 'long'> +1 = 680564733841876926926749214863536422912 +1 = 680564733841876926926749214863536422912 = <type 'long'> = <type 'long'> long:min? = -340282366920938463463374607431768211456 long:min? = -340282366920938463463374607431768211456 = <type 'long'> = <type 'long'> -1 = -340282366920938463463374607431768211457 -1 = -340282366920938463463374607431768211457 = <type 'long'> = <type 'long'>
最大値や最小値という概念が無いことを知っていたので、128ビットまで計算したところで打ち切っている。 計算で出てきた数値に+1、-1してもまだ余地があることが分かる。
Python 3
a = 1 ct = 0 while ct < 128 and (a << 1) + 1 > a: a <<= 1 a += 1 ct += 1 print("ing:max? = ",a) print(" = ",type(a)) a += 1 print(" +1 = ",a) print(" = ",type(a)) a = -1 ct = 0 while ct < 128 and (a << 1) < a: a <<= 1 ct += 1 print("int:min? = ",a) print(" = ",type(a)) a -= 1 print(" -1 = ",a) print(" = ",type(a))
===32bit=== ===64bit=== ing:max? = 680564733841876926926749214863536422911 ing:max? = 680564733841876926926749214863536422911 = <class 'int'> = <class 'int'> +1 = 680564733841876926926749214863536422912 +1 = 680564733841876926926749214863536422912 = <class 'int'> = <class 'int'> int:min? = -340282366920938463463374607431768211456 int:min? = -340282366920938463463374607431768211456 = <class 'int'> = <class 'int'> -1 = -340282366920938463463374607431768211457 -1 = -340282366920938463463374607431768211457 = <class 'int'> = <class 'int'>
こちらはPython 2の場合と同じ。
Ruby
a = 1 ct = 0 while ct < 128 && (a << 1) + 1 > a a <<= 1 a += 1 ct += 1 end print "int:max? = ",a,"\n" a += 1 print " +1 = ",a,"\n" a = -1 ct = 0 while ct < 128 && (a << 1) < a a <<= 1 ct += 1 end print "int:min? = ",a,"\n" a -= 1 print " -1 = ",a,"\n"
===32bit=== ===64bit=== int:max? = 680564733841876926926749214863536422911 int:max? = 680564733841876926926749214863536422911 +1 = 680564733841876926926749214863536422912 +1 = 680564733841876926926749214863536422912 int:min? = -340282366920938463463374607431768211456 int:min? = -340282366920938463463374607431768211456 -1 = -340282366920938463463374607431768211457 -1 = -340282366920938463463374607431768211457
Rubyも最大値や最小値が無いことを知っていたので、Pythonと同じく128ビットで打ち切っている。
Perl
# 参考:http://d.hatena.ne.jp/sardine/20131026 my $a = ~0; print "int:max = ",$a,"\n"; ++$a; print " +1 = ",$a,"\n"; my $a = -(~0 >> 1) - 1; print "int:min = ",$a,"\n"; --$a; print " -1 = ",$a,"\n";
===32bit=== ===64bit=== int:max = 4294967295 | int:max = 18446744073709551615 +1 = 4294967296 | +1 = 1.84467440737096e+19 int:min = -2147483648 | int:min = -9223372036854775808 -1 = -2147483649 | -1 = -9.22337203685478e+18
最初、ビット演算しても期待した結果が得られなくてはまっていた。ソースに書かれた参考サイトの情報が無ければ変な結果を得ていただろう。
32ビットの結果がちょっと変で、最大値/最小値を突き抜けて+1/-1できているように見える。これは何なんだろう・・・
(2017/12/08)様子が分かったので追記。
あるサイト(perl - check if a number is int or float - Stack Overflow)を参考に、変数のダンプ情報を出すようにしてみた。
use Devel::Peek; # 参考:http://d.hatena.ne.jp/sardine/20131026 # 参考:https://stackoverflow.com/questions/4094036/check-if-a-number-is-int-or-float my $a = ~0; print "int:max = ",$a,"\n"; Dump($a); print STDERR "\n"; ++$a; print " +1 = ",$a,"\n"; Dump($a); print STDERR "\n"; my $a = -(~0 >> 1) - 1; print "int:min = ",$a,"\n"; Dump($a); print STDERR "\n"; --$a; print " -1 = ",$a,"\n"; Dump($a);
すると、以下のような出力が得られる。
===32bit=== ===64bit=== int:max = 4294967295 | int:max = 18446744073709551615 SV = IV(0x8139d84) at 0x8139d88 | SV = IV(0x106a778) at 0x106a788 REFCNT = 1 REFCNT = 1 FLAGS = (PADMY,IOK,pIOK,IsUV) FLAGS = (PADMY,IOK,pIOK,IsUV) UV = 4294967295 | UV = 18446744073709551615 +1 = 4294967296 | +1 = 1.84467440737096e+19 SV = PVNV(0x811e9e0) at 0x8139d88 | SV = PVNV(0x104cfe0) at 0x106a788 REFCNT = 1 REFCNT = 1 FLAGS = (PADMY,NOK,POK,pNOK,pPOK) FLAGS = (PADMY,NOK,POK,pNOK,pPOK) IV = 0 IV = 0 NV = 4294967296 | NV = 1.84467440737096e+19 PV = 0x81417a0 "4294967296"\0 | PV = 0x106d530 "1.84467440737096e+19"\0 CUR = 10 | CUR = 20 LEN = 36 | LEN = 40 int:min = -2147483648 | int:min = -9223372036854775808 SV = IV(0x8139e14) at 0x8139e18 | SV = IV(0x106a940) at 0x106a950 REFCNT = 1 REFCNT = 1 FLAGS = (PADMY,IOK,pIOK) FLAGS = (PADMY,IOK,pIOK) IV = -2147483648 | IV = -9223372036854775808 -1 = -2147483649 | -1 = -9.22337203685478e+18 SV = PVNV(0x811e9f4) at 0x8139e18 | SV = PVNV(0x104d000) at 0x106a950 REFCNT = 1 REFCNT = 1 FLAGS = (PADMY,NOK,POK,pNOK,pPOK) FLAGS = (PADMY,NOK,POK,pNOK,pPOK) IV = -2147483648 | IV = -9223372036854775808 NV = -2147483649 | NV = -9.22337203685478e+18 PV = 0x8145810 "-2147483649"\0 | PV = 0x106d820 "-9.22337203685478e+18"\0 CUR = 11 | CUR = 21 LEN = 36 | LEN = 40
これを見ると、最大値/最小値に+1/-1した場合はfloatに自動変換されていることが分かる。これですっきり。
Go
package main import ( "fmt" ) func main() { var a = 0 var ct = 0 a = 1 ct = 0 for ct < 128 && (a << 1) + 1 > a { a <<= 1 a += 1 ct += 1 } fmt.Printf("int:max = %d\n", a) a += 1 fmt.Printf(" +1 = %d\n", a) a = -1 ct = 0 for ct < 128 && (a << 1) < a { a <<= 1 ct += 1 } fmt.Printf("int:min = %d\n", a) a -= 1 fmt.Printf(" -1 = %d\n", a) }
===32bit=== ===64bit=== int:max = 2147483647 | int:max = 9223372036854775807 +1 = -2147483648 | +1 = -9223372036854775808 int:min = -2147483648 | int:min = -9223372036854775808 -1 = 2147483647 | -1 = 9223372036854775807
Go言語は32ビット/64ビットの影響を受けるのだなとちょっと意外だった。
bash
#! /bin/bash a=1 while [ $(((a << 1) + 1)) -gt ${a} ]; do a=$(((a << 1) + 1)) done echo "int:max = ${a}" a=$((a + 1)) echo " +1 = ${a}" a=-1 while [ $((a << 1)) -lt ${a} ]; do a=$((a << 1)) done echo "int:min = ${a}" a=$((a - 1)) echo " -1 = ${a}"
===32bit=== ===64bit=== int:max = 9223372036854775807 int:max = 9223372036854775807 +1 = -9223372036854775808 +1 = -9223372036854775808 int:min = -9223372036854775808 int:min = -9223372036854775808 -1 = 9223372036854775807 -1 = 9223372036854775807
逆に、シェルスクリプトは32ビット/64ビットの影響を受けると思っていたので意外だった。
まとめ
一覧表にしてみる。
32bit max | 32bit min | 64bit max | 64bit min | ||
---|---|---|---|---|---|
Java | byte | 127 | -128 | 127 | -128 |
short | 32767 | -32768 | 32767 | -32768 | |
int | 2147483647 | -2147483648 | 2147483647 | -2147483648 | |
long | 9223372036854775807 | -9223372036854775808 | 9223372036854775807 | -9223372036854775808 | |
C | short | 32767 | -32768 | 32767 | -32768 |
unsigned short | 65535 | 0 | 65535 | 0 | |
int | 2147483647 | -2147483648 | 2147483647 | -2147483648 | |
unsigned int | 4294967295 | 0 | 4294967295 | 0 | |
long | 2147483647 | -2147483648 | 9223372036854775807 | -9223372036854775808 | |
unsigned long | 4294967295 | 0 | 18446744073709551615 | 0 | |
long long | 9223372036854775807 | -9223372036854775808 | 9223372036854775807 | -9223372036854775808 | |
unsigned long long | 18446744073709551615 | 0 | 18446744073709551615 | 0 | |
C++ | short | 32767 | -32768 | 32767 | -32768 |
unsigned short | 65535 | 0 | 65535 | 0 | |
int | 2147483647 | -2147483648 | 2147483647 | -2147483648 | |
unsigned int | 4294967295 | 0 | 4294967295 | 0 | |
long | 2147483647 | -2147483648 | 9223372036854775807 | -9223372036854775808 | |
unsigned long | 4294967295 | 0 | 18446744073709551615 | 0 | |
long long | 9223372036854775807 | -9223372036854775808 | 9223372036854775807 | -9223372036854775808 | |
unsigned long long | 18446744073709551615 | 0 | 18446744073709551615 | 0 | |
PHP | int | 2147483647 | -2147483648 | 9223372036854775807 | -9223372036854775808 |
Python 2 | long | ∞ | -∞ | ∞ | -∞ |
Python 3 | ing | ∞ | -∞ | ∞ | -∞ |
Ruby | int | ∞ | -∞ | ∞ | -∞ |
Perl | int | 4294967295 | -2147483648 | 18446744073709551615 | -9223372036854775808 |
Go | int | 2147483647 | -2147483648 | 9223372036854775807 | -9223372036854775808 |
bash | int | 9223372036854775807 | -9223372036854775808 | 9223372036854775807 | -9223372036854775808 |