Commit 7cf4206a authored by Dmitry Kasatkin's avatar Dmitry Kasatkin Committed by James Morris

Remove unused code from MPI library

MPI library is used by RSA verification implementation.
Few files contains functions which are never called.

James Morris has asked to remove all of them.
Signed-off-by: default avatarDmitry Kasatkin <dmitry.kasatkin@intel.com>
Requested-by: default avatarJames Morris <james.l.morris@oracle.com>
Signed-off-by: default avatarJames Morris <james.l.morris@oracle.com>
parent 9e235dca
...@@ -54,165 +54,3 @@ unsigned mpi_get_nbits(MPI a) ...@@ -54,165 +54,3 @@ unsigned mpi_get_nbits(MPI a)
return n; return n;
} }
EXPORT_SYMBOL_GPL(mpi_get_nbits); EXPORT_SYMBOL_GPL(mpi_get_nbits);
/****************
* Test whether bit N is set.
*/
int mpi_test_bit(MPI a, unsigned n)
{
unsigned limbno, bitno;
mpi_limb_t limb;
limbno = n / BITS_PER_MPI_LIMB;
bitno = n % BITS_PER_MPI_LIMB;
if (limbno >= a->nlimbs)
return 0; /* too far left: this is a 0 */
limb = a->d[limbno];
return (limb & (A_LIMB_1 << bitno)) ? 1 : 0;
}
/****************
* Set bit N of A.
*/
int mpi_set_bit(MPI a, unsigned n)
{
unsigned limbno, bitno;
limbno = n / BITS_PER_MPI_LIMB;
bitno = n % BITS_PER_MPI_LIMB;
if (limbno >= a->nlimbs) { /* resize */
if (a->alloced >= limbno)
if (mpi_resize(a, limbno + 1) < 0)
return -ENOMEM;
a->nlimbs = limbno + 1;
}
a->d[limbno] |= (A_LIMB_1 << bitno);
return 0;
}
/****************
* Set bit N of A. and clear all bits above
*/
int mpi_set_highbit(MPI a, unsigned n)
{
unsigned limbno, bitno;
limbno = n / BITS_PER_MPI_LIMB;
bitno = n % BITS_PER_MPI_LIMB;
if (limbno >= a->nlimbs) { /* resize */
if (a->alloced >= limbno)
if (mpi_resize(a, limbno + 1) < 0)
return -ENOMEM;
a->nlimbs = limbno + 1;
}
a->d[limbno] |= (A_LIMB_1 << bitno);
for (bitno++; bitno < BITS_PER_MPI_LIMB; bitno++)
a->d[limbno] &= ~(A_LIMB_1 << bitno);
a->nlimbs = limbno + 1;
return 0;
}
/****************
* clear bit N of A and all bits above
*/
void mpi_clear_highbit(MPI a, unsigned n)
{
unsigned limbno, bitno;
limbno = n / BITS_PER_MPI_LIMB;
bitno = n % BITS_PER_MPI_LIMB;
if (limbno >= a->nlimbs)
return; /* not allocated, so need to clear bits :-) */
for (; bitno < BITS_PER_MPI_LIMB; bitno++)
a->d[limbno] &= ~(A_LIMB_1 << bitno);
a->nlimbs = limbno + 1;
}
/****************
* Clear bit N of A.
*/
void mpi_clear_bit(MPI a, unsigned n)
{
unsigned limbno, bitno;
limbno = n / BITS_PER_MPI_LIMB;
bitno = n % BITS_PER_MPI_LIMB;
if (limbno >= a->nlimbs)
return; /* don't need to clear this bit, it's to far to left */
a->d[limbno] &= ~(A_LIMB_1 << bitno);
}
/****************
* Shift A by N bits to the right
* FIXME: should use alloc_limb if X and A are same.
*/
int mpi_rshift(MPI x, MPI a, unsigned n)
{
mpi_ptr_t xp;
mpi_size_t xsize;
xsize = a->nlimbs;
x->sign = a->sign;
if (RESIZE_IF_NEEDED(x, (size_t) xsize) < 0)
return -ENOMEM;
xp = x->d;
if (xsize) {
mpihelp_rshift(xp, a->d, xsize, n);
MPN_NORMALIZE(xp, xsize);
}
x->nlimbs = xsize;
return 0;
}
/****************
* Shift A by COUNT limbs to the left
* This is used only within the MPI library
*/
int mpi_lshift_limbs(MPI a, unsigned int count)
{
const int n = a->nlimbs;
mpi_ptr_t ap;
int i;
if (!count || !n)
return 0;
if (RESIZE_IF_NEEDED(a, n + count) < 0)
return -ENOMEM;
ap = a->d;
for (i = n - 1; i >= 0; i--)
ap[i + count] = ap[i];
for (i = 0; i < count; i++)
ap[i] = 0;
a->nlimbs += count;
return 0;
}
/****************
* Shift A by COUNT limbs to the right
* This is used only within the MPI library
*/
void mpi_rshift_limbs(MPI a, unsigned int count)
{
mpi_ptr_t ap = a->d;
mpi_size_t n = a->nlimbs;
unsigned int i;
if (count >= n) {
a->nlimbs = 0;
return;
}
for (i = 0; i < n - count; i++)
ap[i] = ap[i + count];
ap[i] = 0;
a->nlimbs -= count;
}
...@@ -73,81 +73,6 @@ MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread) ...@@ -73,81 +73,6 @@ MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread)
} }
EXPORT_SYMBOL_GPL(mpi_read_from_buffer); EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
/****************
* Make an mpi from a character string.
*/
int mpi_fromstr(MPI val, const char *str)
{
int hexmode = 0, sign = 0, prepend_zero = 0, i, j, c, c1, c2;
unsigned nbits, nbytes, nlimbs;
mpi_limb_t a;
if (*str == '-') {
sign = 1;
str++;
}
if (*str == '0' && str[1] == 'x')
hexmode = 1;
else
return -EINVAL; /* other bases are not yet supported */
str += 2;
nbits = strlen(str) * 4;
if (nbits % 8)
prepend_zero = 1;
nbytes = (nbits + 7) / 8;
nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB;
if (val->alloced < nlimbs)
if (!mpi_resize(val, nlimbs))
return -ENOMEM;
i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
i %= BYTES_PER_MPI_LIMB;
j = val->nlimbs = nlimbs;
val->sign = sign;
for (; j > 0; j--) {
a = 0;
for (; i < BYTES_PER_MPI_LIMB; i++) {
if (prepend_zero) {
c1 = '0';
prepend_zero = 0;
} else
c1 = *str++;
assert(c1);
c2 = *str++;
assert(c2);
if (c1 >= '0' && c1 <= '9')
c = c1 - '0';
else if (c1 >= 'a' && c1 <= 'f')
c = c1 - 'a' + 10;
else if (c1 >= 'A' && c1 <= 'F')
c = c1 - 'A' + 10;
else {
mpi_clear(val);
return 1;
}
c <<= 4;
if (c2 >= '0' && c2 <= '9')
c |= c2 - '0';
else if (c2 >= 'a' && c2 <= 'f')
c |= c2 - 'a' + 10;
else if (c2 >= 'A' && c2 <= 'F')
c |= c2 - 'A' + 10;
else {
mpi_clear(val);
return 1;
}
a <<= 8;
a |= c;
}
i = 0;
val->d[j - 1] = a;
}
return 0;
}
EXPORT_SYMBOL_GPL(mpi_fromstr);
/**************** /****************
* Return an allocated buffer with the MPI (msb first). * Return an allocated buffer with the MPI (msb first).
* NBYTES receives the length of this buffer. Caller must free the * NBYTES receives the length of this buffer. Caller must free the
......
...@@ -37,159 +37,6 @@ ...@@ -37,159 +37,6 @@
#define UDIV_TIME UMUL_TIME #define UDIV_TIME UMUL_TIME
#endif #endif
/* FIXME: We should be using invert_limb (or invert_normalized_limb)
* here (not udiv_qrnnd).
*/
mpi_limb_t
mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
mpi_limb_t divisor_limb)
{
mpi_size_t i;
mpi_limb_t n1, n0, r;
int dummy;
/* Botch: Should this be handled at all? Rely on callers? */
if (!dividend_size)
return 0;
/* If multiplication is much faster than division, and the
* dividend is large, pre-invert the divisor, and use
* only multiplications in the inner loop.
*
* This test should be read:
* Does it ever help to use udiv_qrnnd_preinv?
* && Does what we save compensate for the inversion overhead?
*/
if (UDIV_TIME > (2 * UMUL_TIME + 6)
&& (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) {
int normalization_steps;
count_leading_zeros(normalization_steps, divisor_limb);
if (normalization_steps) {
mpi_limb_t divisor_limb_inverted;
divisor_limb <<= normalization_steps;
/* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
* result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
* most significant bit (with weight 2**N) implicit.
*
* Special case for DIVISOR_LIMB == 100...000.
*/
if (!(divisor_limb << 1))
divisor_limb_inverted = ~(mpi_limb_t) 0;
else
udiv_qrnnd(divisor_limb_inverted, dummy,
-divisor_limb, 0, divisor_limb);
n1 = dividend_ptr[dividend_size - 1];
r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
/* Possible optimization:
* if (r == 0
* && divisor_limb > ((n1 << normalization_steps)
* | (dividend_ptr[dividend_size - 2] >> ...)))
* ...one division less...
*/
for (i = dividend_size - 2; i >= 0; i--) {
n0 = dividend_ptr[i];
UDIV_QRNND_PREINV(dummy, r, r,
((n1 << normalization_steps)
| (n0 >>
(BITS_PER_MPI_LIMB -
normalization_steps))),
divisor_limb,
divisor_limb_inverted);
n1 = n0;
}
UDIV_QRNND_PREINV(dummy, r, r,
n1 << normalization_steps,
divisor_limb, divisor_limb_inverted);
return r >> normalization_steps;
} else {
mpi_limb_t divisor_limb_inverted;
/* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
* result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
* most significant bit (with weight 2**N) implicit.
*
* Special case for DIVISOR_LIMB == 100...000.
*/
if (!(divisor_limb << 1))
divisor_limb_inverted = ~(mpi_limb_t) 0;
else
udiv_qrnnd(divisor_limb_inverted, dummy,
-divisor_limb, 0, divisor_limb);
i = dividend_size - 1;
r = dividend_ptr[i];
if (r >= divisor_limb)
r = 0;
else
i--;
for (; i >= 0; i--) {
n0 = dividend_ptr[i];
UDIV_QRNND_PREINV(dummy, r, r,
n0, divisor_limb,
divisor_limb_inverted);
}
return r;
}
} else {
if (UDIV_NEEDS_NORMALIZATION) {
int normalization_steps;
count_leading_zeros(normalization_steps, divisor_limb);
if (normalization_steps) {
divisor_limb <<= normalization_steps;
n1 = dividend_ptr[dividend_size - 1];
r = n1 >> (BITS_PER_MPI_LIMB -
normalization_steps);
/* Possible optimization:
* if (r == 0
* && divisor_limb > ((n1 << normalization_steps)
* | (dividend_ptr[dividend_size - 2] >> ...)))
* ...one division less...
*/
for (i = dividend_size - 2; i >= 0; i--) {
n0 = dividend_ptr[i];
udiv_qrnnd(dummy, r, r,
((n1 << normalization_steps)
| (n0 >>
(BITS_PER_MPI_LIMB -
normalization_steps))),
divisor_limb);
n1 = n0;
}
udiv_qrnnd(dummy, r, r,
n1 << normalization_steps,
divisor_limb);
return r >> normalization_steps;
}
}
/* No normalization needed, either because udiv_qrnnd doesn't require
* it, or because DIVISOR_LIMB is already normalized. */
i = dividend_size - 1;
r = dividend_ptr[i];
if (r >= divisor_limb)
r = 0;
else
i--;
for (; i >= 0; i--) {
n0 = dividend_ptr[i];
udiv_qrnnd(dummy, r, r, n0, divisor_limb);
}
return r;
}
}
/* Divide num (NP/NSIZE) by den (DP/DSIZE) and write /* Divide num (NP/NSIZE) by den (DP/DSIZE) and write
* the NSIZE-DSIZE least significant quotient limbs at QP * the NSIZE-DSIZE least significant quotient limbs at QP
* and the DSIZE long remainder at NP. If QEXTRA_LIMBS is * and the DSIZE long remainder at NP. If QEXTRA_LIMBS is
...@@ -387,159 +234,3 @@ mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs, ...@@ -387,159 +234,3 @@ mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs,
return most_significant_q_limb; return most_significant_q_limb;
} }
/****************
* Divide (DIVIDEND_PTR,,DIVIDEND_SIZE) by DIVISOR_LIMB.
* Write DIVIDEND_SIZE limbs of quotient at QUOT_PTR.
* Return the single-limb remainder.
* There are no constraints on the value of the divisor.
*
* QUOT_PTR and DIVIDEND_PTR might point to the same limb.
*/
mpi_limb_t
mpihelp_divmod_1(mpi_ptr_t quot_ptr,
mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
mpi_limb_t divisor_limb)
{
mpi_size_t i;
mpi_limb_t n1, n0, r;
int dummy;
if (!dividend_size)
return 0;
/* If multiplication is much faster than division, and the
* dividend is large, pre-invert the divisor, and use
* only multiplications in the inner loop.
*
* This test should be read:
* Does it ever help to use udiv_qrnnd_preinv?
* && Does what we save compensate for the inversion overhead?
*/
if (UDIV_TIME > (2 * UMUL_TIME + 6)
&& (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) {
int normalization_steps;
count_leading_zeros(normalization_steps, divisor_limb);
if (normalization_steps) {
mpi_limb_t divisor_limb_inverted;
divisor_limb <<= normalization_steps;
/* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
* result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
* most significant bit (with weight 2**N) implicit.
*/
/* Special case for DIVISOR_LIMB == 100...000. */
if (!(divisor_limb << 1))
divisor_limb_inverted = ~(mpi_limb_t) 0;
else
udiv_qrnnd(divisor_limb_inverted, dummy,
-divisor_limb, 0, divisor_limb);
n1 = dividend_ptr[dividend_size - 1];
r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
/* Possible optimization:
* if (r == 0
* && divisor_limb > ((n1 << normalization_steps)
* | (dividend_ptr[dividend_size - 2] >> ...)))
* ...one division less...
*/
for (i = dividend_size - 2; i >= 0; i--) {
n0 = dividend_ptr[i];
UDIV_QRNND_PREINV(quot_ptr[i + 1], r, r,
((n1 << normalization_steps)
| (n0 >>
(BITS_PER_MPI_LIMB -
normalization_steps))),
divisor_limb,
divisor_limb_inverted);
n1 = n0;
}
UDIV_QRNND_PREINV(quot_ptr[0], r, r,
n1 << normalization_steps,
divisor_limb, divisor_limb_inverted);
return r >> normalization_steps;
} else {
mpi_limb_t divisor_limb_inverted;
/* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
* result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
* most significant bit (with weight 2**N) implicit.
*/
/* Special case for DIVISOR_LIMB == 100...000. */
if (!(divisor_limb << 1))
divisor_limb_inverted = ~(mpi_limb_t) 0;
else
udiv_qrnnd(divisor_limb_inverted, dummy,
-divisor_limb, 0, divisor_limb);
i = dividend_size - 1;
r = dividend_ptr[i];
if (r >= divisor_limb)
r = 0;
else
quot_ptr[i--] = 0;
for (; i >= 0; i--) {
n0 = dividend_ptr[i];
UDIV_QRNND_PREINV(quot_ptr[i], r, r,
n0, divisor_limb,
divisor_limb_inverted);
}
return r;
}
} else {
if (UDIV_NEEDS_NORMALIZATION) {
int normalization_steps;
count_leading_zeros(normalization_steps, divisor_limb);
if (normalization_steps) {
divisor_limb <<= normalization_steps;
n1 = dividend_ptr[dividend_size - 1];
r = n1 >> (BITS_PER_MPI_LIMB -
normalization_steps);
/* Possible optimization:
* if (r == 0
* && divisor_limb > ((n1 << normalization_steps)
* | (dividend_ptr[dividend_size - 2] >> ...)))
* ...one division less...
*/
for (i = dividend_size - 2; i >= 0; i--) {
n0 = dividend_ptr[i];
udiv_qrnnd(quot_ptr[i + 1], r, r,
((n1 << normalization_steps)
| (n0 >>
(BITS_PER_MPI_LIMB -
normalization_steps))),
divisor_limb);
n1 = n0;
}
udiv_qrnnd(quot_ptr[0], r, r,
n1 << normalization_steps,
divisor_limb);
return r >> normalization_steps;
}
}
/* No normalization needed, either because udiv_qrnnd doesn't require
* it, or because DIVISOR_LIMB is already normalized. */
i = dividend_size - 1;
r = dividend_ptr[i];
if (r >= divisor_limb)
r = 0;
else
quot_ptr[i--] = 0;
for (; i >= 0; i--) {
n0 = dividend_ptr[i];
udiv_qrnnd(quot_ptr[i], r, r, n0, divisor_limb);
}
return r;
}
}
...@@ -330,36 +330,6 @@ mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size, mpi_ptr_t tspace) ...@@ -330,36 +330,6 @@ mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size, mpi_ptr_t tspace)
} }
} }
/* This should be made into an inline function in gmp.h. */
int mpihelp_mul_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size)
{
if (up == vp) {
if (size < KARATSUBA_THRESHOLD)
mpih_sqr_n_basecase(prodp, up, size);
else {
mpi_ptr_t tspace;
tspace = mpi_alloc_limb_space(2 * size);
if (!tspace)
return -ENOMEM;
mpih_sqr_n(prodp, up, size, tspace);
mpi_free_limb_space(tspace);
}
} else {
if (size < KARATSUBA_THRESHOLD)
mul_n_basecase(prodp, up, vp, size);
else {
mpi_ptr_t tspace;
tspace = mpi_alloc_limb_space(2 * size);
if (!tspace)
return -ENOMEM;
mul_n(prodp, up, vp, size, tspace);
mpi_free_limb_space(tspace);
}
}
return 0;
}
int int
mpihelp_mul_karatsuba_case(mpi_ptr_t prodp, mpihelp_mul_karatsuba_case(mpi_ptr_t prodp,
mpi_ptr_t up, mpi_size_t usize, mpi_ptr_t up, mpi_size_t usize,
......
...@@ -106,13 +106,6 @@ int mpi_resize(MPI a, unsigned nlimbs) ...@@ -106,13 +106,6 @@ int mpi_resize(MPI a, unsigned nlimbs)
return 0; return 0;
} }
void mpi_clear(MPI a)
{
a->nlimbs = 0;
a->nbits = 0;
a->flags = 0;
}
void mpi_free(MPI a) void mpi_free(MPI a)
{ {
if (!a) if (!a)
...@@ -128,84 +121,3 @@ void mpi_free(MPI a) ...@@ -128,84 +121,3 @@ void mpi_free(MPI a)
kfree(a); kfree(a);
} }
EXPORT_SYMBOL_GPL(mpi_free); EXPORT_SYMBOL_GPL(mpi_free);
/****************
* Note: This copy function should not interpret the MPI
* but copy it transparently.
*/
int mpi_copy(MPI *copied, const MPI a)
{
size_t i;
MPI b;
*copied = NULL;
if (a) {
b = mpi_alloc(a->nlimbs);
if (!b)
return -ENOMEM;
b->nlimbs = a->nlimbs;
b->sign = a->sign;
b->flags = a->flags;
b->nbits = a->nbits;
for (i = 0; i < b->nlimbs; i++)
b->d[i] = a->d[i];
*copied = b;
}
return 0;
}
int mpi_set(MPI w, const MPI u)
{
mpi_ptr_t wp, up;
mpi_size_t usize = u->nlimbs;
int usign = u->sign;
if (RESIZE_IF_NEEDED(w, (size_t) usize) < 0)
return -ENOMEM;
wp = w->d;
up = u->d;
MPN_COPY(wp, up, usize);
w->nlimbs = usize;
w->nbits = u->nbits;
w->flags = u->flags;
w->sign = usign;
return 0;
}
int mpi_set_ui(MPI w, unsigned long u)
{
if (RESIZE_IF_NEEDED(w, 1) < 0)
return -ENOMEM;
w->d[0] = u;
w->nlimbs = u ? 1 : 0;
w->sign = 0;
w->nbits = 0;
w->flags = 0;
return 0;
}
MPI mpi_alloc_set_ui(unsigned long u)
{
MPI w = mpi_alloc(1);
if (!w)
return w;
w->d[0] = u;
w->nlimbs = u ? 1 : 0;
w->sign = 0;
return w;
}
void mpi_swap(MPI a, MPI b)
{
struct gcry_mpi tmp;
tmp = *a;
*a = *b;
*b = tmp;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment