glibc/math/k_casinhl.c

220 lines
6.4 KiB
C
Raw Normal View History

/* Return arc hyperbole sine for long double value, with the imaginary
part of the result possibly adjusted for use in computing other
functions.
Copyright (C) 1997-2016 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <complex.h>
#include <math.h>
#include <math_private.h>
#include <float.h>
/* To avoid spurious overflows, use this definition to treat IBM long
double as approximating an IEEE-style format. */
#if LDBL_MANT_DIG == 106
# undef LDBL_EPSILON
# define LDBL_EPSILON 0x1p-106L
#endif
/* Return the complex inverse hyperbolic sine of finite nonzero Z,
with the imaginary part of the result subtracted from pi/2 if ADJ
is nonzero. */
__complex__ long double
__kernel_casinhl (__complex__ long double x, int adj)
{
__complex__ long double res;
long double rx, ix;
__complex__ long double y;
/* Avoid cancellation by reducing to the first quadrant. */
rx = fabsl (__real__ x);
ix = fabsl (__imag__ x);
if (rx >= 1.0L / LDBL_EPSILON || ix >= 1.0L / LDBL_EPSILON)
{
/* For large x in the first quadrant, x + csqrt (1 + x * x)
is sufficiently close to 2 * x to make no significant
difference to the result; avoid possible overflow from
the squaring and addition. */
__real__ y = rx;
__imag__ y = ix;
if (adj)
{
long double t = __real__ y;
__real__ y = __copysignl (__imag__ y, __imag__ x);
__imag__ y = t;
}
res = __clogl (y);
__real__ res += M_LN2l;
}
else if (rx >= 0.5L && ix < LDBL_EPSILON / 8.0L)
{
long double s = __ieee754_hypotl (1.0L, rx);
__real__ res = __ieee754_logl (rx + s);
if (adj)
__imag__ res = __ieee754_atan2l (s, __imag__ x);
else
__imag__ res = __ieee754_atan2l (ix, s);
}
else if (rx < LDBL_EPSILON / 8.0L && ix >= 1.5L)
{
long double s = __ieee754_sqrtl ((ix + 1.0L) * (ix - 1.0L));
__real__ res = __ieee754_logl (ix + s);
if (adj)
__imag__ res = __ieee754_atan2l (rx, __copysignl (s, __imag__ x));
else
__imag__ res = __ieee754_atan2l (s, rx);
}
else if (ix > 1.0L && ix < 1.5L && rx < 0.5L)
{
if (rx < LDBL_EPSILON * LDBL_EPSILON)
{
long double ix2m1 = (ix + 1.0L) * (ix - 1.0L);
long double s = __ieee754_sqrtl (ix2m1);
__real__ res = __log1pl (2.0L * (ix2m1 + ix * s)) / 2.0L;
if (adj)
__imag__ res = __ieee754_atan2l (rx, __copysignl (s, __imag__ x));
else
__imag__ res = __ieee754_atan2l (s, rx);
}
else
{
long double ix2m1 = (ix + 1.0L) * (ix - 1.0L);
long double rx2 = rx * rx;
long double f = rx2 * (2.0L + rx2 + 2.0L * ix * ix);
long double d = __ieee754_sqrtl (ix2m1 * ix2m1 + f);
long double dp = d + ix2m1;
long double dm = f / dp;
long double r1 = __ieee754_sqrtl ((dm + rx2) / 2.0L);
long double r2 = rx * ix / r1;
__real__ res
= __log1pl (rx2 + dp + 2.0L * (rx * r1 + ix * r2)) / 2.0L;
if (adj)
__imag__ res = __ieee754_atan2l (rx + r1, __copysignl (ix + r2,
__imag__ x));
else
__imag__ res = __ieee754_atan2l (ix + r2, rx + r1);
}
}
else if (ix == 1.0L && rx < 0.5L)
{
if (rx < LDBL_EPSILON / 8.0L)
{
__real__ res = __log1pl (2.0L * (rx + __ieee754_sqrtl (rx))) / 2.0L;
if (adj)
__imag__ res = __ieee754_atan2l (__ieee754_sqrtl (rx),
__copysignl (1.0L, __imag__ x));
else
__imag__ res = __ieee754_atan2l (1.0L, __ieee754_sqrtl (rx));
}
else
{
long double d = rx * __ieee754_sqrtl (4.0L + rx * rx);
long double s1 = __ieee754_sqrtl ((d + rx * rx) / 2.0L);
long double s2 = __ieee754_sqrtl ((d - rx * rx) / 2.0L);
__real__ res = __log1pl (rx * rx + d + 2.0L * (rx * s1 + s2)) / 2.0L;
if (adj)
__imag__ res = __ieee754_atan2l (rx + s1,
__copysignl (1.0L + s2,
__imag__ x));
else
__imag__ res = __ieee754_atan2l (1.0L + s2, rx + s1);
}
}
else if (ix < 1.0L && rx < 0.5L)
{
if (ix >= LDBL_EPSILON)
{
if (rx < LDBL_EPSILON * LDBL_EPSILON)
{
long double onemix2 = (1.0L + ix) * (1.0L - ix);
long double s = __ieee754_sqrtl (onemix2);
__real__ res = __log1pl (2.0L * rx / s) / 2.0L;
if (adj)
__imag__ res = __ieee754_atan2l (s, __imag__ x);
else
__imag__ res = __ieee754_atan2l (ix, s);
}
else
{
long double onemix2 = (1.0L + ix) * (1.0L - ix);
long double rx2 = rx * rx;
long double f = rx2 * (2.0L + rx2 + 2.0L * ix * ix);
long double d = __ieee754_sqrtl (onemix2 * onemix2 + f);
long double dp = d + onemix2;
long double dm = f / dp;
long double r1 = __ieee754_sqrtl ((dp + rx2) / 2.0L);
long double r2 = rx * ix / r1;
__real__ res
= __log1pl (rx2 + dm + 2.0L * (rx * r1 + ix * r2)) / 2.0L;
if (adj)
__imag__ res = __ieee754_atan2l (rx + r1,
__copysignl (ix + r2,
__imag__ x));
else
__imag__ res = __ieee754_atan2l (ix + r2, rx + r1);
}
}
else
{
long double s = __ieee754_hypotl (1.0L, rx);
__real__ res = __log1pl (2.0L * rx * (rx + s)) / 2.0L;
if (adj)
__imag__ res = __ieee754_atan2l (s, __imag__ x);
else
__imag__ res = __ieee754_atan2l (ix, s);
}
Refactor code forcing underflow exceptions. Various floating-point functions have code to force underflow exceptions if a tiny result was computed in a way that might not have resulted in such exceptions even though the result is inexact. This typically uses math_force_eval to ensure that the underflowing expression is evaluated, but sometimes uses volatile. This patch refactors such code to use three new macros math_check_force_underflow, math_check_force_underflow_nonneg and math_check_force_underflow_complex (which in turn use math_force_eval). In the limited number of cases not suited to a simple conversion to these macros, existing uses of volatile are changed to use math_force_eval instead. The converted code does not always execute exactly the same sequence of operations as the original code, but the overall effects should be the same. Tested for x86_64, x86, mips64 and powerpc. * sysdeps/generic/math_private.h (fabs_tg): New macro. (min_of_type): Likewise. (math_check_force_underflow): Likewise. (math_check_force_underflow_nonneg): Likewise. (math_check_force_underflow_complex): Likewise. * math/e_exp2l.c (__ieee754_exp2l): Use math_check_force_underflow_nonneg. * math/k_casinh.c (__kernel_casinh): Likewise. * math/k_casinhf.c (__kernel_casinhf): Likewise. * math/k_casinhl.c (__kernel_casinhl): Likewise. * math/s_catan.c (__catan): Use math_check_force_underflow_complex. * math/s_catanf.c (__catanf): Likewise. * math/s_catanh.c (__catanh): Likewise. * math/s_catanhf.c (__catanhf): Likewise. * math/s_catanhl.c (__catanhl): Likewise. * math/s_catanl.c (__catanl): Likewise. * math/s_ccosh.c (__ccosh): Likewise. * math/s_ccoshf.c (__ccoshf): Likewise. * math/s_ccoshl.c (__ccoshl): Likewise. * math/s_cexp.c (__cexp): Likewise. * math/s_cexpf.c (__cexpf): Likewise. * math/s_cexpl.c (__cexpl): Likewise. * math/s_clog.c (__clog): Use math_check_force_underflow_nonneg. * math/s_clog10.c (__clog10): Likewise. * math/s_clog10f.c (__clog10f): Likewise. * math/s_clog10l.c (__clog10l): Likewise. * math/s_clogf.c (__clogf): Likewise. * math/s_clogl.c (__clogl): Likewise. * math/s_csin.c (__csin): Use math_check_force_underflow_complex. * math/s_csinf.c (__csinf): Likewise. * math/s_csinh.c (__csinh): Likewise. * math/s_csinhf.c (__csinhf): Likewise. * math/s_csinhl.c (__csinhl): Likewise. * math/s_csinl.c (__csinl): Likewise. * math/s_csqrt.c (__csqrt): Use math_check_force_underflow. * math/s_csqrtf.c (__csqrtf): Likewise. * math/s_csqrtl.c (__csqrtl): Likewise. * math/s_ctan.c (__ctan): Use math_check_force_underflow_complex. * math/s_ctanf.c (__ctanf): Likewise. * math/s_ctanh.c (__ctanh): Likewise. * math/s_ctanhf.c (__ctanhf): Likewise. * math/s_ctanhl.c (__ctanhl): Likewise. * math/s_ctanl.c (__ctanl): Likewise. * stdlib/strtod_l.c (round_and_return): Use math_force_eval instead of volatile. * sysdeps/ieee754/dbl-64/e_asin.c (__ieee754_asin): Use math_check_force_underflow. * sysdeps/ieee754/dbl-64/e_atanh.c (__ieee754_atanh): Likewise. * sysdeps/ieee754/dbl-64/e_exp.c (__ieee754_exp): Do not use volatile when forcing underflow. * sysdeps/ieee754/dbl-64/e_exp2.c (__ieee754_exp2): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/dbl-64/e_gamma_r.c (__ieee754_gamma_r): Likewise. * sysdeps/ieee754/dbl-64/e_j1.c (__ieee754_j1): Use math_check_force_underflow. * sysdeps/ieee754/dbl-64/e_jn.c (__ieee754_jn): Likewise. * sysdeps/ieee754/dbl-64/e_sinh.c (__ieee754_sinh): Likewise. * sysdeps/ieee754/dbl-64/s_asinh.c (__asinh): Likewise. * sysdeps/ieee754/dbl-64/s_atan.c (atan): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/dbl-64/s_erf.c (__erf): Use math_check_force_underflow. * sysdeps/ieee754/dbl-64/s_expm1.c (__expm1): Likewise. * sysdeps/ieee754/dbl-64/s_fma.c (__fma): Use math_force_eval instead of volatile. * sysdeps/ieee754/dbl-64/s_log1p.c (__log1p): Use math_check_force_underflow. * sysdeps/ieee754/dbl-64/s_sin.c (__sin): Likewise. * sysdeps/ieee754/dbl-64/s_tan.c (tan): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/dbl-64/s_tanh.c (__tanh): Use math_check_force_underflow. * sysdeps/ieee754/flt-32/e_asinf.c (__ieee754_asinf): Likewise. * sysdeps/ieee754/flt-32/e_atanhf.c (__ieee754_atanhf): Likewise. * sysdeps/ieee754/flt-32/e_exp2f.c (__ieee754_exp2f): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/flt-32/e_gammaf_r.c (__ieee754_gammaf_r): Likewise. * sysdeps/ieee754/flt-32/e_j1f.c (__ieee754_j1f): Use math_check_force_underflow. * sysdeps/ieee754/flt-32/e_jnf.c (__ieee754_jnf): Likewise. * sysdeps/ieee754/flt-32/e_sinhf.c (__ieee754_sinhf): Likewise. * sysdeps/ieee754/flt-32/k_sinf.c (__kernel_sinf): Likewise. * sysdeps/ieee754/flt-32/k_tanf.c (__kernel_tanf): Likewise. * sysdeps/ieee754/flt-32/s_asinhf.c (__asinhf): Likewise. * sysdeps/ieee754/flt-32/s_atanf.c (__atanf): Likewise. * sysdeps/ieee754/flt-32/s_erff.c (__erff): Likewise. * sysdeps/ieee754/flt-32/s_expm1f.c (__expm1f): Likewise. * sysdeps/ieee754/flt-32/s_log1pf.c (__log1pf): Likewise. * sysdeps/ieee754/flt-32/s_tanhf.c (__tanhf): Likewise. * sysdeps/ieee754/ldbl-128/e_asinl.c (__ieee754_asinl): Likewise. * sysdeps/ieee754/ldbl-128/e_atanhl.c (__ieee754_atanhl): Likewise. * sysdeps/ieee754/ldbl-128/e_expl.c (__ieee754_expl): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/ldbl-128/e_gammal_r.c (__ieee754_gammal_r): Likewise. * sysdeps/ieee754/ldbl-128/e_j1l.c (__ieee754_j1l): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-128/e_jnl.c (__ieee754_jnl): Likewise. * sysdeps/ieee754/ldbl-128/e_sinhl.c (__ieee754_sinhl): Likewise. * sysdeps/ieee754/ldbl-128/k_sincosl.c (__kernel_sincosl): Likewise. * sysdeps/ieee754/ldbl-128/k_sinl.c (__kernel_sinl): Likewise. * sysdeps/ieee754/ldbl-128/k_tanl.c (__kernel_tanl): Likewise. * sysdeps/ieee754/ldbl-128/s_asinhl.c (__asinhl): Likewise. * sysdeps/ieee754/ldbl-128/s_atanl.c (__atanl): Likewise. * sysdeps/ieee754/ldbl-128/s_erfl.c (__erfl): Likewise. * sysdeps/ieee754/ldbl-128/s_expm1l.c (__expm1l): Likewise. * sysdeps/ieee754/ldbl-128/s_fmal.c (__fmal): Use math_force_eval instead of volatile. * sysdeps/ieee754/ldbl-128/s_log1pl.c (__log1pl): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-128/s_tanhl.c (__tanhl): Likewise. * sysdeps/ieee754/ldbl-128ibm/e_asinl.c (__ieee754_asinl): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-128ibm/e_atanhl.c (__ieee754_atanhl): Likewise. * sysdeps/ieee754/ldbl-128ibm/e_gammal_r.c (__ieee754_gammal_r): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/ldbl-128ibm/e_jnl.c (__ieee754_jnl): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-128ibm/e_sinhl.c (__ieee754_sinhl): Likewise. * sysdeps/ieee754/ldbl-128ibm/k_sincosl.c (__kernel_sincosl): Likewise. * sysdeps/ieee754/ldbl-128ibm/k_sinl.c (__kernel_sinl): Likewise. * sysdeps/ieee754/ldbl-128ibm/k_tanl.c (__kernel_tanl): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_asinhl.c (__asinhl): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_atanl.c (__atanl): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_erfl.c (__erfl): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_tanhl.c (__tanhl): Likewise. * sysdeps/ieee754/ldbl-96/e_asinl.c (__ieee754_asinl): Likewise. * sysdeps/ieee754/ldbl-96/e_atanhl.c (__ieee754_atanhl): Likewise. * sysdeps/ieee754/ldbl-96/e_gammal_r.c (__ieee754_gammal_r): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/ldbl-96/e_j1l.c (__ieee754_j1l): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-96/e_jnl.c (__ieee754_jnl): Likewise. * sysdeps/ieee754/ldbl-96/e_sinhl.c (__ieee754_sinhl): Likewise. * sysdeps/ieee754/ldbl-96/k_sinl.c (__kernel_sinl): Likewise. * sysdeps/ieee754/ldbl-96/k_tanl.c (__kernel_tanl): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/ldbl-96/s_asinhl.c (__asinhl): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-96/s_erfl.c (__erfl): Likewise. * sysdeps/ieee754/ldbl-96/s_fmal.c (__fmal): Use math_force_eval instead of volatile. * sysdeps/ieee754/ldbl-96/s_tanhl.c (__tanhl): Use math_check_force_underflow.
2015-09-24 06:42:30 +08:00
math_check_force_underflow_nonneg (__real__ res);
}
else
{
2013-03-20 06:38:25 +08:00
__real__ y = (rx - ix) * (rx + ix) + 1.0L;
__imag__ y = 2.0L * rx * ix;
y = __csqrtl (y);
__real__ y += rx;
__imag__ y += ix;
if (adj)
{
long double t = __real__ y;
__real__ y = __copysignl (__imag__ y, __imag__ x);
__imag__ y = t;
}
res = __clogl (y);
}
/* Give results the correct sign for the original argument. */
__real__ res = __copysignl (__real__ res, __real__ x);
__imag__ res = __copysignl (__imag__ res, (adj ? 1.0L : __imag__ x));
return res;
}