cleanup qemu/util code

This commit is contained in:
Nguyen Anh Quynh
2017-01-10 12:57:12 +08:00
parent fdbbdc6216
commit c1f39c3db2
21 changed files with 1 additions and 1664 deletions

View File

@@ -5,7 +5,6 @@ util-obj-y += module.o
util-obj-y += bitmap.o bitops.o
util-obj-y += error.o
util-obj-y += aes.o
util-obj-y += qemu-option.o
util-obj-y += crc32c.o
util-obj-y += host-utils.o
util-obj-y += getauxval.o

View File

@@ -1057,596 +1057,3 @@ const uint32_t AES_Td4[256] = {
0xe1e1e1e1U, 0x69696969U, 0x14141414U, 0x63636363U,
0x55555555U, 0x21212121U, 0x0c0c0c0cU, 0x7d7d7d7dU,
};
static const u32 rcon[] = {
0x01000000, 0x02000000, 0x04000000, 0x08000000,
0x10000000, 0x20000000, 0x40000000, 0x80000000,
0x1B000000, 0x36000000, /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
};
/**
* Expand the cipher key into the encryption key schedule.
*/
int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
AES_KEY *key) {
u32 *rk;
int i = 0;
u32 temp;
if (!userKey || !key)
return -1;
if (bits != 128 && bits != 192 && bits != 256)
return -2;
rk = key->rd_key;
if (bits==128)
key->rounds = 10;
else if (bits==192)
key->rounds = 12;
else
key->rounds = 14;
rk[0] = GETU32(userKey );
rk[1] = GETU32(userKey + 4);
rk[2] = GETU32(userKey + 8);
rk[3] = GETU32(userKey + 12);
if (bits == 128) {
while (1) {
temp = rk[3];
rk[4] = rk[0] ^
(AES_Te4[(temp >> 16) & 0xff] & 0xff000000) ^
(AES_Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
(AES_Te4[(temp ) & 0xff] & 0x0000ff00) ^
(AES_Te4[(temp >> 24) ] & 0x000000ff) ^
rcon[i];
rk[5] = rk[1] ^ rk[4];
rk[6] = rk[2] ^ rk[5];
rk[7] = rk[3] ^ rk[6];
if (++i == 10) {
return 0;
}
rk += 4;
}
}
rk[4] = GETU32(userKey + 16);
rk[5] = GETU32(userKey + 20);
if (bits == 192) {
while (1) {
temp = rk[ 5];
rk[ 6] = rk[ 0] ^
(AES_Te4[(temp >> 16) & 0xff] & 0xff000000) ^
(AES_Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
(AES_Te4[(temp ) & 0xff] & 0x0000ff00) ^
(AES_Te4[(temp >> 24) ] & 0x000000ff) ^
rcon[i];
rk[ 7] = rk[ 1] ^ rk[ 6];
rk[ 8] = rk[ 2] ^ rk[ 7];
rk[ 9] = rk[ 3] ^ rk[ 8];
if (++i == 8) {
return 0;
}
rk[10] = rk[ 4] ^ rk[ 9];
rk[11] = rk[ 5] ^ rk[10];
rk += 6;
}
}
rk[6] = GETU32(userKey + 24);
rk[7] = GETU32(userKey + 28);
if (bits == 256) {
while (1) {
temp = rk[ 7];
rk[ 8] = rk[ 0] ^
(AES_Te4[(temp >> 16) & 0xff] & 0xff000000) ^
(AES_Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
(AES_Te4[(temp ) & 0xff] & 0x0000ff00) ^
(AES_Te4[(temp >> 24) ] & 0x000000ff) ^
rcon[i];
rk[ 9] = rk[ 1] ^ rk[ 8];
rk[10] = rk[ 2] ^ rk[ 9];
rk[11] = rk[ 3] ^ rk[10];
if (++i == 7) {
return 0;
}
temp = rk[11];
rk[12] = rk[ 4] ^
(AES_Te4[(temp >> 24) ] & 0xff000000) ^
(AES_Te4[(temp >> 16) & 0xff] & 0x00ff0000) ^
(AES_Te4[(temp >> 8) & 0xff] & 0x0000ff00) ^
(AES_Te4[(temp ) & 0xff] & 0x000000ff);
rk[13] = rk[ 5] ^ rk[12];
rk[14] = rk[ 6] ^ rk[13];
rk[15] = rk[ 7] ^ rk[14];
rk += 8;
}
}
return 0;
}
/**
* Expand the cipher key into the decryption key schedule.
*/
int AES_set_decrypt_key(const unsigned char *userKey, const int bits,
AES_KEY *key) {
u32 *rk;
int i, j, status;
u32 temp;
/* first, start with an encryption schedule */
status = AES_set_encrypt_key(userKey, bits, key);
if (status < 0)
return status;
rk = key->rd_key;
/* invert the order of the round keys: */
for (i = 0, j = 4*(key->rounds); i < j; i += 4, j -= 4) {
temp = rk[i ]; rk[i ] = rk[j ]; rk[j ] = temp;
temp = rk[i + 1]; rk[i + 1] = rk[j + 1]; rk[j + 1] = temp;
temp = rk[i + 2]; rk[i + 2] = rk[j + 2]; rk[j + 2] = temp;
temp = rk[i + 3]; rk[i + 3] = rk[j + 3]; rk[j + 3] = temp;
}
/* apply the inverse MixColumn transform to all round keys but the first and the last: */
for (i = 1; i < (key->rounds); i++) {
rk += 4;
rk[0] =
AES_Td0[AES_Te4[(rk[0] >> 24) ] & 0xff] ^
AES_Td1[AES_Te4[(rk[0] >> 16) & 0xff] & 0xff] ^
AES_Td2[AES_Te4[(rk[0] >> 8) & 0xff] & 0xff] ^
AES_Td3[AES_Te4[(rk[0] ) & 0xff] & 0xff];
rk[1] =
AES_Td0[AES_Te4[(rk[1] >> 24) ] & 0xff] ^
AES_Td1[AES_Te4[(rk[1] >> 16) & 0xff] & 0xff] ^
AES_Td2[AES_Te4[(rk[1] >> 8) & 0xff] & 0xff] ^
AES_Td3[AES_Te4[(rk[1] ) & 0xff] & 0xff];
rk[2] =
AES_Td0[AES_Te4[(rk[2] >> 24) ] & 0xff] ^
AES_Td1[AES_Te4[(rk[2] >> 16) & 0xff] & 0xff] ^
AES_Td2[AES_Te4[(rk[2] >> 8) & 0xff] & 0xff] ^
AES_Td3[AES_Te4[(rk[2] ) & 0xff] & 0xff];
rk[3] =
AES_Td0[AES_Te4[(rk[3] >> 24) ] & 0xff] ^
AES_Td1[AES_Te4[(rk[3] >> 16) & 0xff] & 0xff] ^
AES_Td2[AES_Te4[(rk[3] >> 8) & 0xff] & 0xff] ^
AES_Td3[AES_Te4[(rk[3] ) & 0xff] & 0xff];
}
return 0;
}
#ifndef AES_ASM
/*
* Encrypt a single block
* in and out can overlap
*/
void AES_encrypt(const unsigned char *in, unsigned char *out,
const AES_KEY *key) {
const u32 *rk;
u32 s0, s1, s2, s3, t0, t1, t2, t3;
#ifndef FULL_UNROLL
int r;
#endif /* ?FULL_UNROLL */
assert(in && out && key);
rk = key->rd_key;
/*
* map byte array block to cipher state
* and add initial round key:
*/
s0 = GETU32(in ) ^ rk[0];
s1 = GETU32(in + 4) ^ rk[1];
s2 = GETU32(in + 8) ^ rk[2];
s3 = GETU32(in + 12) ^ rk[3];
#ifdef FULL_UNROLL
/* round 1: */
t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[ 4];
t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[ 5];
t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[ 6];
t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[ 7];
/* round 2: */
s0 = AES_Te0[t0 >> 24] ^ AES_Te1[(t1 >> 16) & 0xff] ^ AES_Te2[(t2 >> 8) & 0xff] ^ AES_Te3[t3 & 0xff] ^ rk[ 8];
s1 = AES_Te0[t1 >> 24] ^ AES_Te1[(t2 >> 16) & 0xff] ^ AES_Te2[(t3 >> 8) & 0xff] ^ AES_Te3[t0 & 0xff] ^ rk[ 9];
s2 = AES_Te0[t2 >> 24] ^ AES_Te1[(t3 >> 16) & 0xff] ^ AES_Te2[(t0 >> 8) & 0xff] ^ AES_Te3[t1 & 0xff] ^ rk[10];
s3 = AES_Te0[t3 >> 24] ^ AES_Te1[(t0 >> 16) & 0xff] ^ AES_Te2[(t1 >> 8) & 0xff] ^ AES_Te3[t2 & 0xff] ^ rk[11];
/* round 3: */
t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[12];
t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[13];
t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[14];
t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[15];
/* round 4: */
s0 = AES_Te0[t0 >> 24] ^ AES_Te1[(t1 >> 16) & 0xff] ^ AES_Te2[(t2 >> 8) & 0xff] ^ AES_Te3[t3 & 0xff] ^ rk[16];
s1 = AES_Te0[t1 >> 24] ^ AES_Te1[(t2 >> 16) & 0xff] ^ AES_Te2[(t3 >> 8) & 0xff] ^ AES_Te3[t0 & 0xff] ^ rk[17];
s2 = AES_Te0[t2 >> 24] ^ AES_Te1[(t3 >> 16) & 0xff] ^ AES_Te2[(t0 >> 8) & 0xff] ^ AES_Te3[t1 & 0xff] ^ rk[18];
s3 = AES_Te0[t3 >> 24] ^ AES_Te1[(t0 >> 16) & 0xff] ^ AES_Te2[(t1 >> 8) & 0xff] ^ AES_Te3[t2 & 0xff] ^ rk[19];
/* round 5: */
t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[20];
t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[21];
t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[22];
t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[23];
/* round 6: */
s0 = AES_Te0[t0 >> 24] ^ AES_Te1[(t1 >> 16) & 0xff] ^ AES_Te2[(t2 >> 8) & 0xff] ^ AES_Te3[t3 & 0xff] ^ rk[24];
s1 = AES_Te0[t1 >> 24] ^ AES_Te1[(t2 >> 16) & 0xff] ^ AES_Te2[(t3 >> 8) & 0xff] ^ AES_Te3[t0 & 0xff] ^ rk[25];
s2 = AES_Te0[t2 >> 24] ^ AES_Te1[(t3 >> 16) & 0xff] ^ AES_Te2[(t0 >> 8) & 0xff] ^ AES_Te3[t1 & 0xff] ^ rk[26];
s3 = AES_Te0[t3 >> 24] ^ AES_Te1[(t0 >> 16) & 0xff] ^ AES_Te2[(t1 >> 8) & 0xff] ^ AES_Te3[t2 & 0xff] ^ rk[27];
/* round 7: */
t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[28];
t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[29];
t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[30];
t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[31];
/* round 8: */
s0 = AES_Te0[t0 >> 24] ^ AES_Te1[(t1 >> 16) & 0xff] ^ AES_Te2[(t2 >> 8) & 0xff] ^ AES_Te3[t3 & 0xff] ^ rk[32];
s1 = AES_Te0[t1 >> 24] ^ AES_Te1[(t2 >> 16) & 0xff] ^ AES_Te2[(t3 >> 8) & 0xff] ^ AES_Te3[t0 & 0xff] ^ rk[33];
s2 = AES_Te0[t2 >> 24] ^ AES_Te1[(t3 >> 16) & 0xff] ^ AES_Te2[(t0 >> 8) & 0xff] ^ AES_Te3[t1 & 0xff] ^ rk[34];
s3 = AES_Te0[t3 >> 24] ^ AES_Te1[(t0 >> 16) & 0xff] ^ AES_Te2[(t1 >> 8) & 0xff] ^ AES_Te3[t2 & 0xff] ^ rk[35];
/* round 9: */
t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[36];
t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[37];
t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[38];
t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[39];
if (key->rounds > 10) {
/* round 10: */
s0 = AES_Te0[t0 >> 24] ^ AES_Te1[(t1 >> 16) & 0xff] ^ AES_Te2[(t2 >> 8) & 0xff] ^ AES_Te3[t3 & 0xff] ^ rk[40];
s1 = AES_Te0[t1 >> 24] ^ AES_Te1[(t2 >> 16) & 0xff] ^ AES_Te2[(t3 >> 8) & 0xff] ^ AES_Te3[t0 & 0xff] ^ rk[41];
s2 = AES_Te0[t2 >> 24] ^ AES_Te1[(t3 >> 16) & 0xff] ^ AES_Te2[(t0 >> 8) & 0xff] ^ AES_Te3[t1 & 0xff] ^ rk[42];
s3 = AES_Te0[t3 >> 24] ^ AES_Te1[(t0 >> 16) & 0xff] ^ AES_Te2[(t1 >> 8) & 0xff] ^ AES_Te3[t2 & 0xff] ^ rk[43];
/* round 11: */
t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[44];
t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[45];
t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[46];
t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[47];
if (key->rounds > 12) {
/* round 12: */
s0 = AES_Te0[t0 >> 24] ^ AES_Te1[(t1 >> 16) & 0xff] ^ AES_Te2[(t2 >> 8) & 0xff] ^ AES_Te3[t3 & 0xff] ^ rk[48];
s1 = AES_Te0[t1 >> 24] ^ AES_Te1[(t2 >> 16) & 0xff] ^ AES_Te2[(t3 >> 8) & 0xff] ^ AES_Te3[t0 & 0xff] ^ rk[49];
s2 = AES_Te0[t2 >> 24] ^ AES_Te1[(t3 >> 16) & 0xff] ^ AES_Te2[(t0 >> 8) & 0xff] ^ AES_Te3[t1 & 0xff] ^ rk[50];
s3 = AES_Te0[t3 >> 24] ^ AES_Te1[(t0 >> 16) & 0xff] ^ AES_Te2[(t1 >> 8) & 0xff] ^ AES_Te3[t2 & 0xff] ^ rk[51];
/* round 13: */
t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[52];
t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[53];
t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[54];
t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[55];
}
}
rk += key->rounds << 2;
#else /* !FULL_UNROLL */
/*
* Nr - 1 full rounds:
*/
r = key->rounds >> 1;
for (;;) {
t0 =
AES_Te0[(s0 >> 24) ] ^
AES_Te1[(s1 >> 16) & 0xff] ^
AES_Te2[(s2 >> 8) & 0xff] ^
AES_Te3[(s3 ) & 0xff] ^
rk[4];
t1 =
AES_Te0[(s1 >> 24) ] ^
AES_Te1[(s2 >> 16) & 0xff] ^
AES_Te2[(s3 >> 8) & 0xff] ^
AES_Te3[(s0 ) & 0xff] ^
rk[5];
t2 =
AES_Te0[(s2 >> 24) ] ^
AES_Te1[(s3 >> 16) & 0xff] ^
AES_Te2[(s0 >> 8) & 0xff] ^
AES_Te3[(s1 ) & 0xff] ^
rk[6];
t3 =
AES_Te0[(s3 >> 24) ] ^
AES_Te1[(s0 >> 16) & 0xff] ^
AES_Te2[(s1 >> 8) & 0xff] ^
AES_Te3[(s2 ) & 0xff] ^
rk[7];
rk += 8;
if (--r == 0) {
break;
}
s0 =
AES_Te0[(t0 >> 24) ] ^
AES_Te1[(t1 >> 16) & 0xff] ^
AES_Te2[(t2 >> 8) & 0xff] ^
AES_Te3[(t3 ) & 0xff] ^
rk[0];
s1 =
AES_Te0[(t1 >> 24) ] ^
AES_Te1[(t2 >> 16) & 0xff] ^
AES_Te2[(t3 >> 8) & 0xff] ^
AES_Te3[(t0 ) & 0xff] ^
rk[1];
s2 =
AES_Te0[(t2 >> 24) ] ^
AES_Te1[(t3 >> 16) & 0xff] ^
AES_Te2[(t0 >> 8) & 0xff] ^
AES_Te3[(t1 ) & 0xff] ^
rk[2];
s3 =
AES_Te0[(t3 >> 24) ] ^
AES_Te1[(t0 >> 16) & 0xff] ^
AES_Te2[(t1 >> 8) & 0xff] ^
AES_Te3[(t2 ) & 0xff] ^
rk[3];
}
#endif /* ?FULL_UNROLL */
/*
* apply last round and
* map cipher state to byte array block:
*/
s0 =
(AES_Te4[(t0 >> 24) ] & 0xff000000) ^
(AES_Te4[(t1 >> 16) & 0xff] & 0x00ff0000) ^
(AES_Te4[(t2 >> 8) & 0xff] & 0x0000ff00) ^
(AES_Te4[(t3 ) & 0xff] & 0x000000ff) ^
rk[0];
PUTU32(out , s0);
s1 =
(AES_Te4[(t1 >> 24) ] & 0xff000000) ^
(AES_Te4[(t2 >> 16) & 0xff] & 0x00ff0000) ^
(AES_Te4[(t3 >> 8) & 0xff] & 0x0000ff00) ^
(AES_Te4[(t0 ) & 0xff] & 0x000000ff) ^
rk[1];
PUTU32(out + 4, s1);
s2 =
(AES_Te4[(t2 >> 24) ] & 0xff000000) ^
(AES_Te4[(t3 >> 16) & 0xff] & 0x00ff0000) ^
(AES_Te4[(t0 >> 8) & 0xff] & 0x0000ff00) ^
(AES_Te4[(t1 ) & 0xff] & 0x000000ff) ^
rk[2];
PUTU32(out + 8, s2);
s3 =
(AES_Te4[(t3 >> 24) ] & 0xff000000) ^
(AES_Te4[(t0 >> 16) & 0xff] & 0x00ff0000) ^
(AES_Te4[(t1 >> 8) & 0xff] & 0x0000ff00) ^
(AES_Te4[(t2 ) & 0xff] & 0x000000ff) ^
rk[3];
PUTU32(out + 12, s3);
}
/*
* Decrypt a single block
* in and out can overlap
*/
void AES_decrypt(const unsigned char *in, unsigned char *out,
const AES_KEY *key) {
const u32 *rk;
u32 s0, s1, s2, s3, t0, t1, t2, t3;
#ifndef FULL_UNROLL
int r;
#endif /* ?FULL_UNROLL */
assert(in && out && key);
rk = key->rd_key;
/*
* map byte array block to cipher state
* and add initial round key:
*/
s0 = GETU32(in ) ^ rk[0];
s1 = GETU32(in + 4) ^ rk[1];
s2 = GETU32(in + 8) ^ rk[2];
s3 = GETU32(in + 12) ^ rk[3];
#ifdef FULL_UNROLL
/* round 1: */
t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[ 4];
t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[ 5];
t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[ 6];
t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[ 7];
/* round 2: */
s0 = AES_Td0[t0 >> 24] ^ AES_Td1[(t3 >> 16) & 0xff] ^ AES_Td2[(t2 >> 8) & 0xff] ^ AES_Td3[t1 & 0xff] ^ rk[ 8];
s1 = AES_Td0[t1 >> 24] ^ AES_Td1[(t0 >> 16) & 0xff] ^ AES_Td2[(t3 >> 8) & 0xff] ^ AES_Td3[t2 & 0xff] ^ rk[ 9];
s2 = AES_Td0[t2 >> 24] ^ AES_Td1[(t1 >> 16) & 0xff] ^ AES_Td2[(t0 >> 8) & 0xff] ^ AES_Td3[t3 & 0xff] ^ rk[10];
s3 = AES_Td0[t3 >> 24] ^ AES_Td1[(t2 >> 16) & 0xff] ^ AES_Td2[(t1 >> 8) & 0xff] ^ AES_Td3[t0 & 0xff] ^ rk[11];
/* round 3: */
t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[12];
t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[13];
t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[14];
t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[15];
/* round 4: */
s0 = AES_Td0[t0 >> 24] ^ AES_Td1[(t3 >> 16) & 0xff] ^ AES_Td2[(t2 >> 8) & 0xff] ^ AES_Td3[t1 & 0xff] ^ rk[16];
s1 = AES_Td0[t1 >> 24] ^ AES_Td1[(t0 >> 16) & 0xff] ^ AES_Td2[(t3 >> 8) & 0xff] ^ AES_Td3[t2 & 0xff] ^ rk[17];
s2 = AES_Td0[t2 >> 24] ^ AES_Td1[(t1 >> 16) & 0xff] ^ AES_Td2[(t0 >> 8) & 0xff] ^ AES_Td3[t3 & 0xff] ^ rk[18];
s3 = AES_Td0[t3 >> 24] ^ AES_Td1[(t2 >> 16) & 0xff] ^ AES_Td2[(t1 >> 8) & 0xff] ^ AES_Td3[t0 & 0xff] ^ rk[19];
/* round 5: */
t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[20];
t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[21];
t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[22];
t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[23];
/* round 6: */
s0 = AES_Td0[t0 >> 24] ^ AES_Td1[(t3 >> 16) & 0xff] ^ AES_Td2[(t2 >> 8) & 0xff] ^ AES_Td3[t1 & 0xff] ^ rk[24];
s1 = AES_Td0[t1 >> 24] ^ AES_Td1[(t0 >> 16) & 0xff] ^ AES_Td2[(t3 >> 8) & 0xff] ^ AES_Td3[t2 & 0xff] ^ rk[25];
s2 = AES_Td0[t2 >> 24] ^ AES_Td1[(t1 >> 16) & 0xff] ^ AES_Td2[(t0 >> 8) & 0xff] ^ AES_Td3[t3 & 0xff] ^ rk[26];
s3 = AES_Td0[t3 >> 24] ^ AES_Td1[(t2 >> 16) & 0xff] ^ AES_Td2[(t1 >> 8) & 0xff] ^ AES_Td3[t0 & 0xff] ^ rk[27];
/* round 7: */
t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[28];
t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[29];
t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[30];
t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[31];
/* round 8: */
s0 = AES_Td0[t0 >> 24] ^ AES_Td1[(t3 >> 16) & 0xff] ^ AES_Td2[(t2 >> 8) & 0xff] ^ AES_Td3[t1 & 0xff] ^ rk[32];
s1 = AES_Td0[t1 >> 24] ^ AES_Td1[(t0 >> 16) & 0xff] ^ AES_Td2[(t3 >> 8) & 0xff] ^ AES_Td3[t2 & 0xff] ^ rk[33];
s2 = AES_Td0[t2 >> 24] ^ AES_Td1[(t1 >> 16) & 0xff] ^ AES_Td2[(t0 >> 8) & 0xff] ^ AES_Td3[t3 & 0xff] ^ rk[34];
s3 = AES_Td0[t3 >> 24] ^ AES_Td1[(t2 >> 16) & 0xff] ^ AES_Td2[(t1 >> 8) & 0xff] ^ AES_Td3[t0 & 0xff] ^ rk[35];
/* round 9: */
t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[36];
t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[37];
t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[38];
t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[39];
if (key->rounds > 10) {
/* round 10: */
s0 = AES_Td0[t0 >> 24] ^ AES_Td1[(t3 >> 16) & 0xff] ^ AES_Td2[(t2 >> 8) & 0xff] ^ AES_Td3[t1 & 0xff] ^ rk[40];
s1 = AES_Td0[t1 >> 24] ^ AES_Td1[(t0 >> 16) & 0xff] ^ AES_Td2[(t3 >> 8) & 0xff] ^ AES_Td3[t2 & 0xff] ^ rk[41];
s2 = AES_Td0[t2 >> 24] ^ AES_Td1[(t1 >> 16) & 0xff] ^ AES_Td2[(t0 >> 8) & 0xff] ^ AES_Td3[t3 & 0xff] ^ rk[42];
s3 = AES_Td0[t3 >> 24] ^ AES_Td1[(t2 >> 16) & 0xff] ^ AES_Td2[(t1 >> 8) & 0xff] ^ AES_Td3[t0 & 0xff] ^ rk[43];
/* round 11: */
t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[44];
t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[45];
t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[46];
t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[47];
if (key->rounds > 12) {
/* round 12: */
s0 = AES_Td0[t0 >> 24] ^ AES_Td1[(t3 >> 16) & 0xff] ^ AES_Td2[(t2 >> 8) & 0xff] ^ AES_Td3[t1 & 0xff] ^ rk[48];
s1 = AES_Td0[t1 >> 24] ^ AES_Td1[(t0 >> 16) & 0xff] ^ AES_Td2[(t3 >> 8) & 0xff] ^ AES_Td3[t2 & 0xff] ^ rk[49];
s2 = AES_Td0[t2 >> 24] ^ AES_Td1[(t1 >> 16) & 0xff] ^ AES_Td2[(t0 >> 8) & 0xff] ^ AES_Td3[t3 & 0xff] ^ rk[50];
s3 = AES_Td0[t3 >> 24] ^ AES_Td1[(t2 >> 16) & 0xff] ^ AES_Td2[(t1 >> 8) & 0xff] ^ AES_Td3[t0 & 0xff] ^ rk[51];
/* round 13: */
t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[52];
t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[53];
t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[54];
t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[55];
}
}
rk += key->rounds << 2;
#else /* !FULL_UNROLL */
/*
* Nr - 1 full rounds:
*/
r = key->rounds >> 1;
for (;;) {
t0 =
AES_Td0[(s0 >> 24) ] ^
AES_Td1[(s3 >> 16) & 0xff] ^
AES_Td2[(s2 >> 8) & 0xff] ^
AES_Td3[(s1 ) & 0xff] ^
rk[4];
t1 =
AES_Td0[(s1 >> 24) ] ^
AES_Td1[(s0 >> 16) & 0xff] ^
AES_Td2[(s3 >> 8) & 0xff] ^
AES_Td3[(s2 ) & 0xff] ^
rk[5];
t2 =
AES_Td0[(s2 >> 24) ] ^
AES_Td1[(s1 >> 16) & 0xff] ^
AES_Td2[(s0 >> 8) & 0xff] ^
AES_Td3[(s3 ) & 0xff] ^
rk[6];
t3 =
AES_Td0[(s3 >> 24) ] ^
AES_Td1[(s2 >> 16) & 0xff] ^
AES_Td2[(s1 >> 8) & 0xff] ^
AES_Td3[(s0 ) & 0xff] ^
rk[7];
rk += 8;
if (--r == 0) {
break;
}
s0 =
AES_Td0[(t0 >> 24) ] ^
AES_Td1[(t3 >> 16) & 0xff] ^
AES_Td2[(t2 >> 8) & 0xff] ^
AES_Td3[(t1 ) & 0xff] ^
rk[0];
s1 =
AES_Td0[(t1 >> 24) ] ^
AES_Td1[(t0 >> 16) & 0xff] ^
AES_Td2[(t3 >> 8) & 0xff] ^
AES_Td3[(t2 ) & 0xff] ^
rk[1];
s2 =
AES_Td0[(t2 >> 24) ] ^
AES_Td1[(t1 >> 16) & 0xff] ^
AES_Td2[(t0 >> 8) & 0xff] ^
AES_Td3[(t3 ) & 0xff] ^
rk[2];
s3 =
AES_Td0[(t3 >> 24) ] ^
AES_Td1[(t2 >> 16) & 0xff] ^
AES_Td2[(t1 >> 8) & 0xff] ^
AES_Td3[(t0 ) & 0xff] ^
rk[3];
}
#endif /* ?FULL_UNROLL */
/*
* apply last round and
* map cipher state to byte array block:
*/
s0 =
(AES_Td4[(t0 >> 24) ] & 0xff000000) ^
(AES_Td4[(t3 >> 16) & 0xff] & 0x00ff0000) ^
(AES_Td4[(t2 >> 8) & 0xff] & 0x0000ff00) ^
(AES_Td4[(t1 ) & 0xff] & 0x000000ff) ^
rk[0];
PUTU32(out , s0);
s1 =
(AES_Td4[(t1 >> 24) ] & 0xff000000) ^
(AES_Td4[(t0 >> 16) & 0xff] & 0x00ff0000) ^
(AES_Td4[(t3 >> 8) & 0xff] & 0x0000ff00) ^
(AES_Td4[(t2 ) & 0xff] & 0x000000ff) ^
rk[1];
PUTU32(out + 4, s1);
s2 =
(AES_Td4[(t2 >> 24) ] & 0xff000000) ^
(AES_Td4[(t1 >> 16) & 0xff] & 0x00ff0000) ^
(AES_Td4[(t0 >> 8) & 0xff] & 0x0000ff00) ^
(AES_Td4[(t3 ) & 0xff] & 0x000000ff) ^
rk[2];
PUTU32(out + 8, s2);
s3 =
(AES_Td4[(t3 >> 24) ] & 0xff000000) ^
(AES_Td4[(t2 >> 16) & 0xff] & 0x00ff0000) ^
(AES_Td4[(t1 >> 8) & 0xff] & 0x0000ff00) ^
(AES_Td4[(t0 ) & 0xff] & 0x000000ff) ^
rk[3];
PUTU32(out + 12, s3);
}
#endif /* AES_ASM */
void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
const unsigned long length, const AES_KEY *key,
unsigned char *ivec, const int enc)
{
unsigned long n;
unsigned long len = length;
unsigned char tmp[AES_BLOCK_SIZE];
assert(in && out && key && ivec);
if (enc) {
while (len >= AES_BLOCK_SIZE) {
for(n=0; n < AES_BLOCK_SIZE; ++n)
tmp[n] = in[n] ^ ivec[n];
AES_encrypt(tmp, out, key);
memcpy(ivec, out, AES_BLOCK_SIZE);
len -= AES_BLOCK_SIZE;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
if (len) {
for(n=0; n < len; ++n)
tmp[n] = in[n] ^ ivec[n];
for(n=len; n < AES_BLOCK_SIZE; ++n)
tmp[n] = ivec[n];
AES_encrypt(tmp, tmp, key);
memcpy(out, tmp, AES_BLOCK_SIZE);
memcpy(ivec, tmp, AES_BLOCK_SIZE);
}
} else {
while (len >= AES_BLOCK_SIZE) {
memcpy(tmp, in, AES_BLOCK_SIZE);
AES_decrypt(in, out, key);
for(n=0; n < AES_BLOCK_SIZE; ++n)
out[n] ^= ivec[n];
memcpy(ivec, tmp, AES_BLOCK_SIZE);
len -= AES_BLOCK_SIZE;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
if (len) {
memcpy(tmp, in, AES_BLOCK_SIZE);
AES_decrypt(tmp, tmp, key);
for(n=0; n < len; ++n)
out[n] = tmp[n] ^ ivec[n];
memcpy(ivec, tmp, AES_BLOCK_SIZE);
}
}
}

View File

@@ -12,149 +12,6 @@
#include "qemu/bitops.h"
#include "qemu/bitmap.h"
/*
* bitmaps provide an array of bits, implemented using an an
* array of unsigned longs. The number of valid bits in a
* given bitmap does _not_ need to be an exact multiple of
* BITS_PER_LONG.
*
* The possible unused bits in the last, partially used word
* of a bitmap are 'don't care'. The implementation makes
* no particular effort to keep them zero. It ensures that
* their value will not affect the results of any operation.
* The bitmap operations that return Boolean (bitmap_empty,
* for example) or scalar (bitmap_weight, for example) results
* carefully filter out these unused bits from impacting their
* results.
*
* These operations actually hold to a slightly stronger rule:
* if you don't input any bitmaps to these ops that have some
* unused bits set, then they won't output any set unused bits
* in output bitmaps.
*
* The byte ordering of bitmaps is more natural on little
* endian architectures.
*/
int slow_bitmap_empty(const unsigned long *bitmap, long bits)
{
long k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k) {
if (bitmap[k]) {
return 0;
}
}
if (bits % BITS_PER_LONG) {
if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) {
return 0;
}
}
return 1;
}
int slow_bitmap_full(const unsigned long *bitmap, long bits)
{
long k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k) {
if (~bitmap[k]) {
return 0;
}
}
if (bits % BITS_PER_LONG) {
if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) {
return 0;
}
}
return 1;
}
int slow_bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, long bits)
{
long k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k) {
if (bitmap1[k] != bitmap2[k]) {
return 0;
}
}
if (bits % BITS_PER_LONG) {
if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) {
return 0;
}
}
return 1;
}
void slow_bitmap_complement(unsigned long *dst, const unsigned long *src,
long bits)
{
long k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k) {
dst[k] = ~src[k];
}
if (bits % BITS_PER_LONG) {
dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits);
}
}
int slow_bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, long bits)
{
long k;
long nr = BITS_TO_LONGS(bits);
unsigned long result = 0;
for (k = 0; k < nr; k++) {
result |= (dst[k] = bitmap1[k] & bitmap2[k]);
}
return result != 0;
}
void slow_bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, long bits)
{
long k;
long nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++) {
dst[k] = bitmap1[k] | bitmap2[k];
}
}
void slow_bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, long bits)
{
long k;
long nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++) {
dst[k] = bitmap1[k] ^ bitmap2[k];
}
}
int slow_bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, long bits)
{
long k;
long nr = BITS_TO_LONGS(bits);
unsigned long result = 0;
for (k = 0; k < nr; k++) {
result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
}
return result != 0;
}
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
void bitmap_set(unsigned long *map, long start, long nr)
@@ -196,61 +53,3 @@ void bitmap_clear(unsigned long *map, long start, long nr)
*p &= ~mask_to_clear;
}
}
#define ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
/**
* bitmap_find_next_zero_area - find a contiguous aligned zero area
* @map: The address to base the search on
* @size: The bitmap size in bits
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
* @align_mask: Alignment mask for zero area
*
* The @align_mask should be one less than a power of 2; the effect is that
* the bit offset of all zero areas this function finds is multiples of that
* power of 2. A @align_mask of 0 means no alignment is required.
*/
unsigned long bitmap_find_next_zero_area(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned long nr,
unsigned long align_mask)
{
unsigned long index, end, i;
again:
index = find_next_zero_bit(map, size, start);
/* Align allocation */
index = ALIGN_MASK(index, align_mask);
end = index + nr;
if (end > size) {
return end;
}
i = find_next_bit(map, end, index);
if (i < end) {
start = i + 1;
goto again;
}
return index;
}
int slow_bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, long bits)
{
long k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k) {
if (bitmap1[k] & bitmap2[k]) {
return 1;
}
}
if (bits % BITS_PER_LONG) {
if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) {
return 1;
}
}
return 0;
}

View File

@@ -127,32 +127,3 @@ found_first:
found_middle:
return result + ctzl(~tmp);
}
unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
{
unsigned long words;
unsigned long tmp;
/* Start at final word. */
words = size / BITS_PER_LONG;
/* Partial final word? */
if (size & (BITS_PER_LONG-1)) {
tmp = (addr[words] & (~0UL >> (BITS_PER_LONG
- (size & (BITS_PER_LONG-1)))));
if (tmp) {
goto found;
}
}
while (words) {
tmp = addr[--words];
if (tmp) {
found:
return words * BITS_PER_LONG + BITS_PER_LONG - 1 - clzl(tmp);
}
}
/* Not found */
return size;
}

View File

@@ -28,13 +28,6 @@
#include <errno.h>
void strpadcpy(char *buf, int buf_size, const char *str, char pad)
{
int len = qemu_strnlen(str, buf_size);
memcpy(buf, str, len);
memset(buf + len, pad, buf_size - len);
}
void pstrcpy(char *buf, int buf_size, const char *str)
{
int c;
@@ -78,108 +71,11 @@ int strstart(const char *str, const char *val, const char **ptr)
return 1;
}
int stristart(const char *str, const char *val, const char **ptr)
{
const char *p, *q;
p = str;
q = val;
while (*q != '\0') {
if (qemu_toupper(*p) != qemu_toupper(*q))
return 0;
p++;
q++;
}
if (ptr)
*ptr = p;
return 1;
}
/* XXX: use host strnlen if available ? */
int qemu_strnlen(const char *s, int max_len)
{
int i;
for(i = 0; i < max_len; i++) {
if (s[i] == '\0') {
break;
}
}
return i;
}
char *qemu_strsep(char **input, const char *delim)
{
char *result = *input;
if (result != NULL) {
char *p;
for (p = result; *p != '\0'; p++) {
if (strchr(delim, *p)) {
break;
}
}
if (*p == '\0') {
*input = NULL;
} else {
*p = '\0';
*input = p + 1;
}
}
return result;
}
time_t mktimegm(struct tm *tm)
{
time_t t;
int y = tm->tm_year + 1900, m = tm->tm_mon + 1, d = tm->tm_mday;
if (m < 3) {
m += 12;
y--;
}
t = 86400ULL * (d + (153 * m - 457) / 5 + 365 * y + y / 4 - y / 100 +
y / 400 - 719469);
t += 3600 * tm->tm_hour + 60 * tm->tm_min + tm->tm_sec;
return t;
}
int qemu_fls(int i)
{
return 32 - clz32(i);
}
/*
* Make sure data goes on disk, but if possible do not bother to
* write out the inode just for timestamp updates.
*
* Unfortunately even in 2009 many operating systems do not support
* fdatasync and have to fall back to fsync.
*/
int qemu_fdatasync(int fd)
{
#ifdef CONFIG_FDATASYNC
return fdatasync(fd);
#else
return fsync(fd);
#endif
}
#ifndef _WIN32
/* Sets a specific flag */
int fcntl_setfl(int fd, int flag)
{
int flags;
flags = fcntl(fd, F_GETFL);
if (flags == -1)
return -errno;
if (fcntl(fd, F_SETFL, flags | flag) == -1)
return -errno;
return 0;
}
#endif
static int64_t suffix_mul(char suffix, int64_t unit)
{
switch (qemu_toupper(suffix)) {
@@ -259,161 +155,3 @@ int64_t strtosz(const char *nptr, char **end)
{
return strtosz_suffix(nptr, end, STRTOSZ_DEFSUFFIX_MB);
}
/**
* parse_uint:
*
* @s: String to parse
* @value: Destination for parsed integer value
* @endptr: Destination for pointer to first character not consumed
* @base: integer base, between 2 and 36 inclusive, or 0
*
* Parse unsigned integer
*
* Parsed syntax is like strtoull()'s: arbitrary whitespace, a single optional
* '+' or '-', an optional "0x" if @base is 0 or 16, one or more digits.
*
* If @s is null, or @base is invalid, or @s doesn't start with an
* integer in the syntax above, set *@value to 0, *@endptr to @s, and
* return -EINVAL.
*
* Set *@endptr to point right beyond the parsed integer (even if the integer
* overflows or is negative, all digits will be parsed and *@endptr will
* point right beyond them).
*
* If the integer is negative, set *@value to 0, and return -ERANGE.
*
* If the integer overflows unsigned long long, set *@value to
* ULLONG_MAX, and return -ERANGE.
*
* Else, set *@value to the parsed integer, and return 0.
*/
int parse_uint(const char *s, unsigned long long *value, char **endptr,
int base)
{
int r = 0;
char *endp = (char *)s;
unsigned long long val = 0;
if (!s) {
r = -EINVAL;
goto out;
}
errno = 0;
val = strtoull(s, &endp, base);
if (errno) {
r = -errno;
goto out;
}
if (endp == s) {
r = -EINVAL;
goto out;
}
/* make sure we reject negative numbers: */
while (isspace((unsigned char)*s)) {
s++;
}
if (*s == '-') {
val = 0;
r = -ERANGE;
goto out;
}
out:
*value = val;
*endptr = endp;
return r;
}
/**
* parse_uint_full:
*
* @s: String to parse
* @value: Destination for parsed integer value
* @base: integer base, between 2 and 36 inclusive, or 0
*
* Parse unsigned integer from entire string
*
* Have the same behavior of parse_uint(), but with an additional check
* for additional data after the parsed number. If extra characters are present
* after the parsed number, the function will return -EINVAL, and *@v will
* be set to 0.
*/
int parse_uint_full(const char *s, unsigned long long *value, int base)
{
char *endp;
int r;
r = parse_uint(s, value, &endp, base);
if (r < 0) {
return r;
}
if (*endp) {
*value = 0;
return -EINVAL;
}
return 0;
}
int qemu_parse_fd(const char *param)
{
long fd;
char *endptr;
errno = 0;
fd = strtol(param, &endptr, 10);
if (param == endptr /* no conversion performed */ ||
errno != 0 /* not representable as long; possibly others */ ||
*endptr != '\0' /* final string not empty */ ||
fd < 0 /* invalid as file descriptor */ ||
fd > INT_MAX /* not representable as int */) {
return -1;
}
return fd;
}
/* round down to the nearest power of 2*/
int64_t pow2floor(int64_t value)
{
if (!is_power_of_2(value)) {
value = 0x8000000000000000ULL >> clz64(value);
}
return value;
}
/*
* Implementation of ULEB128 (http://en.wikipedia.org/wiki/LEB128)
* Input is limited to 14-bit numbers
*/
int uleb128_encode_small(uint8_t *out, uint32_t n)
{
g_assert(n <= 0x3fff);
if (n < 0x80) {
*out++ = n;
return 1;
} else {
*out++ = (n & 0x7f) | 0x80;
*out++ = n >> 7;
return 2;
}
}
int uleb128_decode_small(const uint8_t *in, uint32_t *n)
{
if (!(*in & 0x80)) {
*n = *in++;
return 1;
} else {
*n = *in++ & 0x7f;
/* we exceed 14 bit number */
if (*in & 0x80) {
return -1;
}
*n |= *in++ << 7;
return 2;
}
}

View File

@@ -1,70 +0,0 @@
/*
* Commandline option parsing functions
*
* Copyright (c) 2003-2008 Fabrice Bellard
* Copyright (c) 2009 Kevin Wolf <kwolf@redhat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <stdio.h>
#include <string.h>
#include "qemu-common.h"
#include "qapi/qmp/qerror.h"
#include "qemu/option.h"
void parse_option_size(const char *name, const char *value,
uint64_t *ret, Error **errp)
{
char *postfix;
double sizef;
if (value != NULL) {
sizef = strtod(value, &postfix);
switch (*postfix) {
case 'T':
sizef *= 1024;
/* fall through */
case 'G':
sizef *= 1024;
/* fall through */
case 'M':
sizef *= 1024;
/* fall through */
case 'K':
case 'k':
sizef *= 1024;
/* fall through */
case 'b':
case '\0':
*ret = (uint64_t) sizef;
break;
default:
error_set(errp, QERR_INVALID_PARAMETER_VALUE, name, "a size");
#if 0 /* conversion from qerror_report() to error_set() broke this: */
error_printf_unless_qmp("You may use k, M, G or T suffixes for "
"kilobytes, megabytes, gigabytes and terabytes.\n");
#endif
return;
}
} else {
error_set(errp, QERR_INVALID_PARAMETER_VALUE, name, "a size");
}
}