root/common/gcc-millicode/muldi3.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __muldi3
  2. __lmulq

   1 /*-
   2  * Copyright (c) 1992, 1993
   3  *      The Regents of the University of California.  All rights reserved.
   4  *
   5  * This software was developed by the Computer Systems Engineering group
   6  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
   7  * contributed to Berkeley.
   8  *
   9  * Redistribution and use in source and binary forms, with or without
  10  * modification, are permitted provided that the following conditions
  11  * are met:
  12  * 1. Redistributions of source code must retain the above copyright
  13  *    notice, this list of conditions and the following disclaimer.
  14  * 2. Redistributions in binary form must reproduce the above copyright
  15  *    notice, this list of conditions and the following disclaimer in the
  16  *    documentation and/or other materials provided with the distribution.
  17  * 3. Neither the name of the University nor the names of its contributors
  18  *    may be used to endorse or promote products derived from this software
  19  *    without specific prior written permission.
  20  *
  21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  31  * SUCH DAMAGE.
  32  *
  33  * From:
  34  *      @(#)muldi3.c    8.1 (Berkeley) 6/4/93
  35  *      NetBSD: muldi3.c,v 1.1 2005/12/20 19:28:51 christos Exp
  36  */
  37 
  38 #include "longlong.h"
  39 
  40 /*
  41  * Multiply two long longs.
  42  *
  43  * Our algorithm is based on the following.  Split incoming long long
  44  * values u and v (where u,v >= 0) into
  45  *
  46  *      u = 2^n u1  *  u0       (n = number of bits in `unsigned int', usu. 32)
  47  *
  48  * and 
  49  *
  50  *      v = 2^n v1  *  v0
  51  *
  52  * Then
  53  *
  54  *      uv = 2^2n u1 v1  +  2^n u1 v0  +  2^n v1 u0  +  u0 v0
  55  *         = 2^2n u1 v1  +     2^n (u1 v0 + v1 u0)   +  u0 v0
  56  *
  57  * Now add 2^n u1 v1 to the first term and subtract it from the middle,
  58  * and add 2^n u0 v0 to the last term and subtract it from the middle.
  59  * This gives:
  60  *
  61  *      uv = (2^2n + 2^n) (u1 v1)  +
  62  *               (2^n)    (u1 v0 - u1 v1 + u0 v1 - u0 v0)  +
  63  *             (2^n + 1)  (u0 v0)
  64  *
  65  * Factoring the middle a bit gives us:
  66  *
  67  *      uv = (2^2n + 2^n) (u1 v1)  +                    [u1v1 = high]
  68  *               (2^n)    (u1 - u0) (v0 - v1)  +        [(u1-u0)... = mid]
  69  *             (2^n + 1)  (u0 v0)                       [u0v0 = low]
  70  *
  71  * The terms (u1 v1), (u1 - u0) (v0 - v1), and (u0 v0) can all be done
  72  * in just half the precision of the original.  (Note that either or both
  73  * of (u1 - u0) or (v0 - v1) may be negative.)
  74  *
  75  * This algorithm is from Knuth vol. 2 (2nd ed), section 4.3.3, p. 278.
  76  *
  77  * Since C does not give us a `int * int = long long' operator, we split
  78  * our input long longs into two ints, then split the two ints into two
  79  * shorts.  We can then calculate `short * short = int' in native
  80  * arithmetic.
  81  *
  82  * Our product should, strictly speaking, be a `long long long', with
  83  * 128 bits, but we are going to discard the upper 64.  In other words,
  84  * we are not interested in uv, but rather in (uv mod 2^2n).  This
  85  * makes some of the terms above vanish, and we get:
  86  *
  87  *      (2^n)(high) + (2^n)(mid) + (2^n + 1)(low)
  88  *
  89  * or
  90  *
  91  *      (2^n)(high + mid + low) + low
  92  *
  93  * Furthermore, `high' and `mid' can be computed mod 2^n, as any factor
  94  * of 2^n in either one will also vanish.  Only `low' need be computed
  95  * mod 2^2n, and only because of the final term above.
  96  */
  97 static long long __lmulq(unsigned int, unsigned int);
  98 
  99 long long
 100 __muldi3(long long a, long long b)
 101 {
 102         union uu u, v, low, prod;
 103         unsigned int high, mid, udiff, vdiff;
 104         int negall, negmid;
 105 #define u1      u.ui[H]
 106 #define u0      u.ui[L]
 107 #define v1      v.ui[H]
 108 #define v0      v.ui[L]
 109 
 110         /*
 111          * Get u and v such that u, v >= 0.  When this is finished,
 112          * u1, u0, v1, and v0 will be directly accessible through the
 113          * int fields.
 114          */
 115         if (a >= 0)
 116                 u.ll = a, negall = 0;
 117         else
 118                 u.ll = -a, negall = 1;
 119         if (b >= 0)
 120                 v.ll = b;
 121         else
 122                 v.ll = -b, negall ^= 1;
 123 
 124         if (u1 == 0 && v1 == 0) {
 125                 /*
 126                  * An (I hope) important optimization occurs when u1 and v1
 127                  * are both 0.  This should be common since most numbers
 128                  * are small.  Here the product is just u0*v0.
 129                  */
 130                 prod.ll = __lmulq(u0, v0);
 131         } else {
 132                 /*
 133                  * Compute the three intermediate products, remembering
 134                  * whether the middle term is negative.  We can discard
 135                  * any upper bits in high and mid, so we can use native
 136                  * unsigned int * unsigned int => unsigned int arithmetic.
 137                  */
 138                 low.ll = __lmulq(u0, v0);
 139 
 140                 if (u1 >= u0)
 141                         negmid = 0, udiff = u1 - u0;
 142                 else
 143                         negmid = 1, udiff = u0 - u1;
 144                 if (v0 >= v1)
 145                         vdiff = v0 - v1;
 146                 else
 147                         vdiff = v1 - v0, negmid ^= 1;
 148                 mid = udiff * vdiff;
 149 
 150                 high = u1 * v1;
 151 
 152                 /*
 153                  * Assemble the final product.
 154                  */
 155                 prod.ui[H] = high + (negmid ? -mid : mid) + low.ui[L] +
 156                     low.ui[H];
 157                 prod.ui[L] = low.ui[L];
 158         }
 159         return (negall ? -prod.ll : prod.ll);
 160 #undef u1
 161 #undef u0
 162 #undef v1
 163 #undef v0
 164 }
 165 
 166 /*
 167  * Multiply two 2N-bit ints to produce a 4N-bit long long, where N is
 168  * half the number of bits in an int (whatever that is---the code
 169  * below does not care as long as the header file does its part of the
 170  * bargain---but typically N==16).
 171  *
 172  * We use the same algorithm from Knuth, but this time the modulo refinement
 173  * does not apply.  On the other hand, since N is half the size of an int,
 174  * we can get away with native multiplication---none of our input terms
 175  * exceeds (UINT_MAX >> 1).
 176  *
 177  * Note that, for unsigned int l, the quad-precision (long long) result
 178  *
 179  *      l << N
 180  *
 181  * splits into high and low ints as HHALF(l) and LHUP(l) respectively.
 182  */
 183 static long long
 184 __lmulq(unsigned int u, unsigned int v)
 185 {
 186         unsigned int u1, u0, v1, v0, udiff, vdiff, high, mid, low;
 187         unsigned int prodh, prodl, was;
 188         union uu prod;
 189         int neg;
 190 
 191         u1 = HHALF(u);
 192         u0 = LHALF(u);
 193         v1 = HHALF(v);
 194         v0 = LHALF(v);
 195 
 196         low = u0 * v0;
 197 
 198         /* This is the same small-number optimization as before. */
 199         if (u1 == 0 && v1 == 0)
 200                 return (low);
 201 
 202         if (u1 >= u0)
 203                 udiff = u1 - u0, neg = 0;
 204         else
 205                 udiff = u0 - u1, neg = 1;
 206         if (v0 >= v1)
 207                 vdiff = v0 - v1;
 208         else
 209                 vdiff = v1 - v0, neg ^= 1;
 210         mid = udiff * vdiff;
 211 
 212         high = u1 * v1;
 213 
 214         /* prod = (high << 2N) + (high << N); */
 215         prodh = high + HHALF(high);
 216         prodl = LHUP(high);
 217 
 218         /* if (neg) prod -= mid << N; else prod += mid << N; */
 219         if (neg) {
 220                 was = prodl;
 221                 prodl -= LHUP(mid);
 222                 prodh -= HHALF(mid) + (prodl > was);
 223         } else {
 224                 was = prodl;
 225                 prodl += LHUP(mid);
 226                 prodh += HHALF(mid) + (prodl < was);
 227         }
 228 
 229         /* prod += low << N */
 230         was = prodl;
 231         prodl += LHUP(low);
 232         prodh += HHALF(low) + (prodl < was);
 233         /* ... + low; */
 234         if ((prodl += low) < low)
 235                 prodh++;
 236 
 237         /* return 4N-bit product */
 238         prod.ui[H] = prodh;
 239         prod.ui[L] = prodl;
 240         return (prod.ll);
 241 }

/* [<][>][^][v][top][bottom][index][help] */