summaryrefslogtreecommitdiff
path: root/2.3-1/src/fortran/lapack/zhgeqz.f
diff options
context:
space:
mode:
Diffstat (limited to '2.3-1/src/fortran/lapack/zhgeqz.f')
-rw-r--r--2.3-1/src/fortran/lapack/zhgeqz.f759
1 files changed, 759 insertions, 0 deletions
diff --git a/2.3-1/src/fortran/lapack/zhgeqz.f b/2.3-1/src/fortran/lapack/zhgeqz.f
new file mode 100644
index 00000000..6a9403bd
--- /dev/null
+++ b/2.3-1/src/fortran/lapack/zhgeqz.f
@@ -0,0 +1,759 @@
+ SUBROUTINE ZHGEQZ( JOB, COMPQ, COMPZ, N, ILO, IHI, H, LDH, T, LDT,
+ $ ALPHA, BETA, Q, LDQ, Z, LDZ, WORK, LWORK,
+ $ RWORK, INFO )
+*
+* -- LAPACK routine (version 3.1) --
+* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd..
+* November 2006
+*
+* .. Scalar Arguments ..
+ CHARACTER COMPQ, COMPZ, JOB
+ INTEGER IHI, ILO, INFO, LDH, LDQ, LDT, LDZ, LWORK, N
+* ..
+* .. Array Arguments ..
+ DOUBLE PRECISION RWORK( * )
+ COMPLEX*16 ALPHA( * ), BETA( * ), H( LDH, * ),
+ $ Q( LDQ, * ), T( LDT, * ), WORK( * ),
+ $ Z( LDZ, * )
+* ..
+*
+* Purpose
+* =======
+*
+* ZHGEQZ computes the eigenvalues of a complex matrix pair (H,T),
+* where H is an upper Hessenberg matrix and T is upper triangular,
+* using the single-shift QZ method.
+* Matrix pairs of this type are produced by the reduction to
+* generalized upper Hessenberg form of a complex matrix pair (A,B):
+*
+* A = Q1*H*Z1**H, B = Q1*T*Z1**H,
+*
+* as computed by ZGGHRD.
+*
+* If JOB='S', then the Hessenberg-triangular pair (H,T) is
+* also reduced to generalized Schur form,
+*
+* H = Q*S*Z**H, T = Q*P*Z**H,
+*
+* where Q and Z are unitary matrices and S and P are upper triangular.
+*
+* Optionally, the unitary matrix Q from the generalized Schur
+* factorization may be postmultiplied into an input matrix Q1, and the
+* unitary matrix Z may be postmultiplied into an input matrix Z1.
+* If Q1 and Z1 are the unitary matrices from ZGGHRD that reduced
+* the matrix pair (A,B) to generalized Hessenberg form, then the output
+* matrices Q1*Q and Z1*Z are the unitary factors from the generalized
+* Schur factorization of (A,B):
+*
+* A = (Q1*Q)*S*(Z1*Z)**H, B = (Q1*Q)*P*(Z1*Z)**H.
+*
+* To avoid overflow, eigenvalues of the matrix pair (H,T)
+* (equivalently, of (A,B)) are computed as a pair of complex values
+* (alpha,beta). If beta is nonzero, lambda = alpha / beta is an
+* eigenvalue of the generalized nonsymmetric eigenvalue problem (GNEP)
+* A*x = lambda*B*x
+* and if alpha is nonzero, mu = beta / alpha is an eigenvalue of the
+* alternate form of the GNEP
+* mu*A*y = B*y.
+* The values of alpha and beta for the i-th eigenvalue can be read
+* directly from the generalized Schur form: alpha = S(i,i),
+* beta = P(i,i).
+*
+* Ref: C.B. Moler & G.W. Stewart, "An Algorithm for Generalized Matrix
+* Eigenvalue Problems", SIAM J. Numer. Anal., 10(1973),
+* pp. 241--256.
+*
+* Arguments
+* =========
+*
+* JOB (input) CHARACTER*1
+* = 'E': Compute eigenvalues only;
+* = 'S': Computer eigenvalues and the Schur form.
+*
+* COMPQ (input) CHARACTER*1
+* = 'N': Left Schur vectors (Q) are not computed;
+* = 'I': Q is initialized to the unit matrix and the matrix Q
+* of left Schur vectors of (H,T) is returned;
+* = 'V': Q must contain a unitary matrix Q1 on entry and
+* the product Q1*Q is returned.
+*
+* COMPZ (input) CHARACTER*1
+* = 'N': Right Schur vectors (Z) are not computed;
+* = 'I': Q is initialized to the unit matrix and the matrix Z
+* of right Schur vectors of (H,T) is returned;
+* = 'V': Z must contain a unitary matrix Z1 on entry and
+* the product Z1*Z is returned.
+*
+* N (input) INTEGER
+* The order of the matrices H, T, Q, and Z. N >= 0.
+*
+* ILO (input) INTEGER
+* IHI (input) INTEGER
+* ILO and IHI mark the rows and columns of H which are in
+* Hessenberg form. It is assumed that A is already upper
+* triangular in rows and columns 1:ILO-1 and IHI+1:N.
+* If N > 0, 1 <= ILO <= IHI <= N; if N = 0, ILO=1 and IHI=0.
+*
+* H (input/output) COMPLEX*16 array, dimension (LDH, N)
+* On entry, the N-by-N upper Hessenberg matrix H.
+* On exit, if JOB = 'S', H contains the upper triangular
+* matrix S from the generalized Schur factorization.
+* If JOB = 'E', the diagonal of H matches that of S, but
+* the rest of H is unspecified.
+*
+* LDH (input) INTEGER
+* The leading dimension of the array H. LDH >= max( 1, N ).
+*
+* T (input/output) COMPLEX*16 array, dimension (LDT, N)
+* On entry, the N-by-N upper triangular matrix T.
+* On exit, if JOB = 'S', T contains the upper triangular
+* matrix P from the generalized Schur factorization.
+* If JOB = 'E', the diagonal of T matches that of P, but
+* the rest of T is unspecified.
+*
+* LDT (input) INTEGER
+* The leading dimension of the array T. LDT >= max( 1, N ).
+*
+* ALPHA (output) COMPLEX*16 array, dimension (N)
+* The complex scalars alpha that define the eigenvalues of
+* GNEP. ALPHA(i) = S(i,i) in the generalized Schur
+* factorization.
+*
+* BETA (output) COMPLEX*16 array, dimension (N)
+* The real non-negative scalars beta that define the
+* eigenvalues of GNEP. BETA(i) = P(i,i) in the generalized
+* Schur factorization.
+*
+* Together, the quantities alpha = ALPHA(j) and beta = BETA(j)
+* represent the j-th eigenvalue of the matrix pair (A,B), in
+* one of the forms lambda = alpha/beta or mu = beta/alpha.
+* Since either lambda or mu may overflow, they should not,
+* in general, be computed.
+*
+* Q (input/output) COMPLEX*16 array, dimension (LDQ, N)
+* On entry, if COMPZ = 'V', the unitary matrix Q1 used in the
+* reduction of (A,B) to generalized Hessenberg form.
+* On exit, if COMPZ = 'I', the unitary matrix of left Schur
+* vectors of (H,T), and if COMPZ = 'V', the unitary matrix of
+* left Schur vectors of (A,B).
+* Not referenced if COMPZ = 'N'.
+*
+* LDQ (input) INTEGER
+* The leading dimension of the array Q. LDQ >= 1.
+* If COMPQ='V' or 'I', then LDQ >= N.
+*
+* Z (input/output) COMPLEX*16 array, dimension (LDZ, N)
+* On entry, if COMPZ = 'V', the unitary matrix Z1 used in the
+* reduction of (A,B) to generalized Hessenberg form.
+* On exit, if COMPZ = 'I', the unitary matrix of right Schur
+* vectors of (H,T), and if COMPZ = 'V', the unitary matrix of
+* right Schur vectors of (A,B).
+* Not referenced if COMPZ = 'N'.
+*
+* LDZ (input) INTEGER
+* The leading dimension of the array Z. LDZ >= 1.
+* If COMPZ='V' or 'I', then LDZ >= N.
+*
+* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK))
+* On exit, if INFO >= 0, WORK(1) returns the optimal LWORK.
+*
+* LWORK (input) INTEGER
+* The dimension of the array WORK. LWORK >= max(1,N).
+*
+* If LWORK = -1, then a workspace query is assumed; the routine
+* only calculates the optimal size of the WORK array, returns
+* this value as the first entry of the WORK array, and no error
+* message related to LWORK is issued by XERBLA.
+*
+* RWORK (workspace) DOUBLE PRECISION array, dimension (N)
+*
+* INFO (output) INTEGER
+* = 0: successful exit
+* < 0: if INFO = -i, the i-th argument had an illegal value
+* = 1,...,N: the QZ iteration did not converge. (H,T) is not
+* in Schur form, but ALPHA(i) and BETA(i),
+* i=INFO+1,...,N should be correct.
+* = N+1,...,2*N: the shift calculation failed. (H,T) is not
+* in Schur form, but ALPHA(i) and BETA(i),
+* i=INFO-N+1,...,N should be correct.
+*
+* Further Details
+* ===============
+*
+* We assume that complex ABS works as long as its value is less than
+* overflow.
+*
+* =====================================================================
+*
+* .. Parameters ..
+ COMPLEX*16 CZERO, CONE
+ PARAMETER ( CZERO = ( 0.0D+0, 0.0D+0 ),
+ $ CONE = ( 1.0D+0, 0.0D+0 ) )
+ DOUBLE PRECISION ZERO, ONE
+ PARAMETER ( ZERO = 0.0D+0, ONE = 1.0D+0 )
+ DOUBLE PRECISION HALF
+ PARAMETER ( HALF = 0.5D+0 )
+* ..
+* .. Local Scalars ..
+ LOGICAL ILAZR2, ILAZRO, ILQ, ILSCHR, ILZ, LQUERY
+ INTEGER ICOMPQ, ICOMPZ, IFIRST, IFRSTM, IITER, ILAST,
+ $ ILASTM, IN, ISCHUR, ISTART, J, JC, JCH, JITER,
+ $ JR, MAXIT
+ DOUBLE PRECISION ABSB, ANORM, ASCALE, ATOL, BNORM, BSCALE, BTOL,
+ $ C, SAFMIN, TEMP, TEMP2, TEMPR, ULP
+ COMPLEX*16 ABI22, AD11, AD12, AD21, AD22, CTEMP, CTEMP2,
+ $ CTEMP3, ESHIFT, RTDISC, S, SHIFT, SIGNBC, T1,
+ $ U12, X
+* ..
+* .. External Functions ..
+ LOGICAL LSAME
+ DOUBLE PRECISION DLAMCH, ZLANHS
+ EXTERNAL LSAME, DLAMCH, ZLANHS
+* ..
+* .. External Subroutines ..
+ EXTERNAL XERBLA, ZLARTG, ZLASET, ZROT, ZSCAL
+* ..
+* .. Intrinsic Functions ..
+ INTRINSIC ABS, DBLE, DCMPLX, DCONJG, DIMAG, MAX, MIN,
+ $ SQRT
+* ..
+* .. Statement Functions ..
+ DOUBLE PRECISION ABS1
+* ..
+* .. Statement Function definitions ..
+ ABS1( X ) = ABS( DBLE( X ) ) + ABS( DIMAG( X ) )
+* ..
+* .. Executable Statements ..
+*
+* Decode JOB, COMPQ, COMPZ
+*
+ IF( LSAME( JOB, 'E' ) ) THEN
+ ILSCHR = .FALSE.
+ ISCHUR = 1
+ ELSE IF( LSAME( JOB, 'S' ) ) THEN
+ ILSCHR = .TRUE.
+ ISCHUR = 2
+ ELSE
+ ISCHUR = 0
+ END IF
+*
+ IF( LSAME( COMPQ, 'N' ) ) THEN
+ ILQ = .FALSE.
+ ICOMPQ = 1
+ ELSE IF( LSAME( COMPQ, 'V' ) ) THEN
+ ILQ = .TRUE.
+ ICOMPQ = 2
+ ELSE IF( LSAME( COMPQ, 'I' ) ) THEN
+ ILQ = .TRUE.
+ ICOMPQ = 3
+ ELSE
+ ICOMPQ = 0
+ END IF
+*
+ IF( LSAME( COMPZ, 'N' ) ) THEN
+ ILZ = .FALSE.
+ ICOMPZ = 1
+ ELSE IF( LSAME( COMPZ, 'V' ) ) THEN
+ ILZ = .TRUE.
+ ICOMPZ = 2
+ ELSE IF( LSAME( COMPZ, 'I' ) ) THEN
+ ILZ = .TRUE.
+ ICOMPZ = 3
+ ELSE
+ ICOMPZ = 0
+ END IF
+*
+* Check Argument Values
+*
+ INFO = 0
+ WORK( 1 ) = MAX( 1, N )
+ LQUERY = ( LWORK.EQ.-1 )
+ IF( ISCHUR.EQ.0 ) THEN
+ INFO = -1
+ ELSE IF( ICOMPQ.EQ.0 ) THEN
+ INFO = -2
+ ELSE IF( ICOMPZ.EQ.0 ) THEN
+ INFO = -3
+ ELSE IF( N.LT.0 ) THEN
+ INFO = -4
+ ELSE IF( ILO.LT.1 ) THEN
+ INFO = -5
+ ELSE IF( IHI.GT.N .OR. IHI.LT.ILO-1 ) THEN
+ INFO = -6
+ ELSE IF( LDH.LT.N ) THEN
+ INFO = -8
+ ELSE IF( LDT.LT.N ) THEN
+ INFO = -10
+ ELSE IF( LDQ.LT.1 .OR. ( ILQ .AND. LDQ.LT.N ) ) THEN
+ INFO = -14
+ ELSE IF( LDZ.LT.1 .OR. ( ILZ .AND. LDZ.LT.N ) ) THEN
+ INFO = -16
+ ELSE IF( LWORK.LT.MAX( 1, N ) .AND. .NOT.LQUERY ) THEN
+ INFO = -18
+ END IF
+ IF( INFO.NE.0 ) THEN
+ CALL XERBLA( 'ZHGEQZ', -INFO )
+ RETURN
+ ELSE IF( LQUERY ) THEN
+ RETURN
+ END IF
+*
+* Quick return if possible
+*
+* WORK( 1 ) = CMPLX( 1 )
+ IF( N.LE.0 ) THEN
+ WORK( 1 ) = DCMPLX( 1 )
+ RETURN
+ END IF
+*
+* Initialize Q and Z
+*
+ IF( ICOMPQ.EQ.3 )
+ $ CALL ZLASET( 'Full', N, N, CZERO, CONE, Q, LDQ )
+ IF( ICOMPZ.EQ.3 )
+ $ CALL ZLASET( 'Full', N, N, CZERO, CONE, Z, LDZ )
+*
+* Machine Constants
+*
+ IN = IHI + 1 - ILO
+ SAFMIN = DLAMCH( 'S' )
+ ULP = DLAMCH( 'E' )*DLAMCH( 'B' )
+ ANORM = ZLANHS( 'F', IN, H( ILO, ILO ), LDH, RWORK )
+ BNORM = ZLANHS( 'F', IN, T( ILO, ILO ), LDT, RWORK )
+ ATOL = MAX( SAFMIN, ULP*ANORM )
+ BTOL = MAX( SAFMIN, ULP*BNORM )
+ ASCALE = ONE / MAX( SAFMIN, ANORM )
+ BSCALE = ONE / MAX( SAFMIN, BNORM )
+*
+*
+* Set Eigenvalues IHI+1:N
+*
+ DO 10 J = IHI + 1, N
+ ABSB = ABS( T( J, J ) )
+ IF( ABSB.GT.SAFMIN ) THEN
+ SIGNBC = DCONJG( T( J, J ) / ABSB )
+ T( J, J ) = ABSB
+ IF( ILSCHR ) THEN
+ CALL ZSCAL( J-1, SIGNBC, T( 1, J ), 1 )
+ CALL ZSCAL( J, SIGNBC, H( 1, J ), 1 )
+ ELSE
+ H( J, J ) = H( J, J )*SIGNBC
+ END IF
+ IF( ILZ )
+ $ CALL ZSCAL( N, SIGNBC, Z( 1, J ), 1 )
+ ELSE
+ T( J, J ) = CZERO
+ END IF
+ ALPHA( J ) = H( J, J )
+ BETA( J ) = T( J, J )
+ 10 CONTINUE
+*
+* If IHI < ILO, skip QZ steps
+*
+ IF( IHI.LT.ILO )
+ $ GO TO 190
+*
+* MAIN QZ ITERATION LOOP
+*
+* Initialize dynamic indices
+*
+* Eigenvalues ILAST+1:N have been found.
+* Column operations modify rows IFRSTM:whatever
+* Row operations modify columns whatever:ILASTM
+*
+* If only eigenvalues are being computed, then
+* IFRSTM is the row of the last splitting row above row ILAST;
+* this is always at least ILO.
+* IITER counts iterations since the last eigenvalue was found,
+* to tell when to use an extraordinary shift.
+* MAXIT is the maximum number of QZ sweeps allowed.
+*
+ ILAST = IHI
+ IF( ILSCHR ) THEN
+ IFRSTM = 1
+ ILASTM = N
+ ELSE
+ IFRSTM = ILO
+ ILASTM = IHI
+ END IF
+ IITER = 0
+ ESHIFT = CZERO
+ MAXIT = 30*( IHI-ILO+1 )
+*
+ DO 170 JITER = 1, MAXIT
+*
+* Check for too many iterations.
+*
+ IF( JITER.GT.MAXIT )
+ $ GO TO 180
+*
+* Split the matrix if possible.
+*
+* Two tests:
+* 1: H(j,j-1)=0 or j=ILO
+* 2: T(j,j)=0
+*
+* Special case: j=ILAST
+*
+ IF( ILAST.EQ.ILO ) THEN
+ GO TO 60
+ ELSE
+ IF( ABS1( H( ILAST, ILAST-1 ) ).LE.ATOL ) THEN
+ H( ILAST, ILAST-1 ) = CZERO
+ GO TO 60
+ END IF
+ END IF
+*
+ IF( ABS( T( ILAST, ILAST ) ).LE.BTOL ) THEN
+ T( ILAST, ILAST ) = CZERO
+ GO TO 50
+ END IF
+*
+* General case: j<ILAST
+*
+ DO 40 J = ILAST - 1, ILO, -1
+*
+* Test 1: for H(j,j-1)=0 or j=ILO
+*
+ IF( J.EQ.ILO ) THEN
+ ILAZRO = .TRUE.
+ ELSE
+ IF( ABS1( H( J, J-1 ) ).LE.ATOL ) THEN
+ H( J, J-1 ) = CZERO
+ ILAZRO = .TRUE.
+ ELSE
+ ILAZRO = .FALSE.
+ END IF
+ END IF
+*
+* Test 2: for T(j,j)=0
+*
+ IF( ABS( T( J, J ) ).LT.BTOL ) THEN
+ T( J, J ) = CZERO
+*
+* Test 1a: Check for 2 consecutive small subdiagonals in A
+*
+ ILAZR2 = .FALSE.
+ IF( .NOT.ILAZRO ) THEN
+ IF( ABS1( H( J, J-1 ) )*( ASCALE*ABS1( H( J+1,
+ $ J ) ) ).LE.ABS1( H( J, J ) )*( ASCALE*ATOL ) )
+ $ ILAZR2 = .TRUE.
+ END IF
+*
+* If both tests pass (1 & 2), i.e., the leading diagonal
+* element of B in the block is zero, split a 1x1 block off
+* at the top. (I.e., at the J-th row/column) The leading
+* diagonal element of the remainder can also be zero, so
+* this may have to be done repeatedly.
+*
+ IF( ILAZRO .OR. ILAZR2 ) THEN
+ DO 20 JCH = J, ILAST - 1
+ CTEMP = H( JCH, JCH )
+ CALL ZLARTG( CTEMP, H( JCH+1, JCH ), C, S,
+ $ H( JCH, JCH ) )
+ H( JCH+1, JCH ) = CZERO
+ CALL ZROT( ILASTM-JCH, H( JCH, JCH+1 ), LDH,
+ $ H( JCH+1, JCH+1 ), LDH, C, S )
+ CALL ZROT( ILASTM-JCH, T( JCH, JCH+1 ), LDT,
+ $ T( JCH+1, JCH+1 ), LDT, C, S )
+ IF( ILQ )
+ $ CALL ZROT( N, Q( 1, JCH ), 1, Q( 1, JCH+1 ), 1,
+ $ C, DCONJG( S ) )
+ IF( ILAZR2 )
+ $ H( JCH, JCH-1 ) = H( JCH, JCH-1 )*C
+ ILAZR2 = .FALSE.
+ IF( ABS1( T( JCH+1, JCH+1 ) ).GE.BTOL ) THEN
+ IF( JCH+1.GE.ILAST ) THEN
+ GO TO 60
+ ELSE
+ IFIRST = JCH + 1
+ GO TO 70
+ END IF
+ END IF
+ T( JCH+1, JCH+1 ) = CZERO
+ 20 CONTINUE
+ GO TO 50
+ ELSE
+*
+* Only test 2 passed -- chase the zero to T(ILAST,ILAST)
+* Then process as in the case T(ILAST,ILAST)=0
+*
+ DO 30 JCH = J, ILAST - 1
+ CTEMP = T( JCH, JCH+1 )
+ CALL ZLARTG( CTEMP, T( JCH+1, JCH+1 ), C, S,
+ $ T( JCH, JCH+1 ) )
+ T( JCH+1, JCH+1 ) = CZERO
+ IF( JCH.LT.ILASTM-1 )
+ $ CALL ZROT( ILASTM-JCH-1, T( JCH, JCH+2 ), LDT,
+ $ T( JCH+1, JCH+2 ), LDT, C, S )
+ CALL ZROT( ILASTM-JCH+2, H( JCH, JCH-1 ), LDH,
+ $ H( JCH+1, JCH-1 ), LDH, C, S )
+ IF( ILQ )
+ $ CALL ZROT( N, Q( 1, JCH ), 1, Q( 1, JCH+1 ), 1,
+ $ C, DCONJG( S ) )
+ CTEMP = H( JCH+1, JCH )
+ CALL ZLARTG( CTEMP, H( JCH+1, JCH-1 ), C, S,
+ $ H( JCH+1, JCH ) )
+ H( JCH+1, JCH-1 ) = CZERO
+ CALL ZROT( JCH+1-IFRSTM, H( IFRSTM, JCH ), 1,
+ $ H( IFRSTM, JCH-1 ), 1, C, S )
+ CALL ZROT( JCH-IFRSTM, T( IFRSTM, JCH ), 1,
+ $ T( IFRSTM, JCH-1 ), 1, C, S )
+ IF( ILZ )
+ $ CALL ZROT( N, Z( 1, JCH ), 1, Z( 1, JCH-1 ), 1,
+ $ C, S )
+ 30 CONTINUE
+ GO TO 50
+ END IF
+ ELSE IF( ILAZRO ) THEN
+*
+* Only test 1 passed -- work on J:ILAST
+*
+ IFIRST = J
+ GO TO 70
+ END IF
+*
+* Neither test passed -- try next J
+*
+ 40 CONTINUE
+*
+* (Drop-through is "impossible")
+*
+ INFO = 2*N + 1
+ GO TO 210
+*
+* T(ILAST,ILAST)=0 -- clear H(ILAST,ILAST-1) to split off a
+* 1x1 block.
+*
+ 50 CONTINUE
+ CTEMP = H( ILAST, ILAST )
+ CALL ZLARTG( CTEMP, H( ILAST, ILAST-1 ), C, S,
+ $ H( ILAST, ILAST ) )
+ H( ILAST, ILAST-1 ) = CZERO
+ CALL ZROT( ILAST-IFRSTM, H( IFRSTM, ILAST ), 1,
+ $ H( IFRSTM, ILAST-1 ), 1, C, S )
+ CALL ZROT( ILAST-IFRSTM, T( IFRSTM, ILAST ), 1,
+ $ T( IFRSTM, ILAST-1 ), 1, C, S )
+ IF( ILZ )
+ $ CALL ZROT( N, Z( 1, ILAST ), 1, Z( 1, ILAST-1 ), 1, C, S )
+*
+* H(ILAST,ILAST-1)=0 -- Standardize B, set ALPHA and BETA
+*
+ 60 CONTINUE
+ ABSB = ABS( T( ILAST, ILAST ) )
+ IF( ABSB.GT.SAFMIN ) THEN
+ SIGNBC = DCONJG( T( ILAST, ILAST ) / ABSB )
+ T( ILAST, ILAST ) = ABSB
+ IF( ILSCHR ) THEN
+ CALL ZSCAL( ILAST-IFRSTM, SIGNBC, T( IFRSTM, ILAST ), 1 )
+ CALL ZSCAL( ILAST+1-IFRSTM, SIGNBC, H( IFRSTM, ILAST ),
+ $ 1 )
+ ELSE
+ H( ILAST, ILAST ) = H( ILAST, ILAST )*SIGNBC
+ END IF
+ IF( ILZ )
+ $ CALL ZSCAL( N, SIGNBC, Z( 1, ILAST ), 1 )
+ ELSE
+ T( ILAST, ILAST ) = CZERO
+ END IF
+ ALPHA( ILAST ) = H( ILAST, ILAST )
+ BETA( ILAST ) = T( ILAST, ILAST )
+*
+* Go to next block -- exit if finished.
+*
+ ILAST = ILAST - 1
+ IF( ILAST.LT.ILO )
+ $ GO TO 190
+*
+* Reset counters
+*
+ IITER = 0
+ ESHIFT = CZERO
+ IF( .NOT.ILSCHR ) THEN
+ ILASTM = ILAST
+ IF( IFRSTM.GT.ILAST )
+ $ IFRSTM = ILO
+ END IF
+ GO TO 160
+*
+* QZ step
+*
+* This iteration only involves rows/columns IFIRST:ILAST. We
+* assume IFIRST < ILAST, and that the diagonal of B is non-zero.
+*
+ 70 CONTINUE
+ IITER = IITER + 1
+ IF( .NOT.ILSCHR ) THEN
+ IFRSTM = IFIRST
+ END IF
+*
+* Compute the Shift.
+*
+* At this point, IFIRST < ILAST, and the diagonal elements of
+* T(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in
+* magnitude)
+*
+ IF( ( IITER / 10 )*10.NE.IITER ) THEN
+*
+* The Wilkinson shift (AEP p.512), i.e., the eigenvalue of
+* the bottom-right 2x2 block of A inv(B) which is nearest to
+* the bottom-right element.
+*
+* We factor B as U*D, where U has unit diagonals, and
+* compute (A*inv(D))*inv(U).
+*
+ U12 = ( BSCALE*T( ILAST-1, ILAST ) ) /
+ $ ( BSCALE*T( ILAST, ILAST ) )
+ AD11 = ( ASCALE*H( ILAST-1, ILAST-1 ) ) /
+ $ ( BSCALE*T( ILAST-1, ILAST-1 ) )
+ AD21 = ( ASCALE*H( ILAST, ILAST-1 ) ) /
+ $ ( BSCALE*T( ILAST-1, ILAST-1 ) )
+ AD12 = ( ASCALE*H( ILAST-1, ILAST ) ) /
+ $ ( BSCALE*T( ILAST, ILAST ) )
+ AD22 = ( ASCALE*H( ILAST, ILAST ) ) /
+ $ ( BSCALE*T( ILAST, ILAST ) )
+ ABI22 = AD22 - U12*AD21
+*
+ T1 = HALF*( AD11+ABI22 )
+ RTDISC = SQRT( T1**2+AD12*AD21-AD11*AD22 )
+ TEMP = DBLE( T1-ABI22 )*DBLE( RTDISC ) +
+ $ DIMAG( T1-ABI22 )*DIMAG( RTDISC )
+ IF( TEMP.LE.ZERO ) THEN
+ SHIFT = T1 + RTDISC
+ ELSE
+ SHIFT = T1 - RTDISC
+ END IF
+ ELSE
+*
+* Exceptional shift. Chosen for no particularly good reason.
+*
+ ESHIFT = ESHIFT + DCONJG( ( ASCALE*H( ILAST-1, ILAST ) ) /
+ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) )
+ SHIFT = ESHIFT
+ END IF
+*
+* Now check for two consecutive small subdiagonals.
+*
+ DO 80 J = ILAST - 1, IFIRST + 1, -1
+ ISTART = J
+ CTEMP = ASCALE*H( J, J ) - SHIFT*( BSCALE*T( J, J ) )
+ TEMP = ABS1( CTEMP )
+ TEMP2 = ASCALE*ABS1( H( J+1, J ) )
+ TEMPR = MAX( TEMP, TEMP2 )
+ IF( TEMPR.LT.ONE .AND. TEMPR.NE.ZERO ) THEN
+ TEMP = TEMP / TEMPR
+ TEMP2 = TEMP2 / TEMPR
+ END IF
+ IF( ABS1( H( J, J-1 ) )*TEMP2.LE.TEMP*ATOL )
+ $ GO TO 90
+ 80 CONTINUE
+*
+ ISTART = IFIRST
+ CTEMP = ASCALE*H( IFIRST, IFIRST ) -
+ $ SHIFT*( BSCALE*T( IFIRST, IFIRST ) )
+ 90 CONTINUE
+*
+* Do an implicit-shift QZ sweep.
+*
+* Initial Q
+*
+ CTEMP2 = ASCALE*H( ISTART+1, ISTART )
+ CALL ZLARTG( CTEMP, CTEMP2, C, S, CTEMP3 )
+*
+* Sweep
+*
+ DO 150 J = ISTART, ILAST - 1
+ IF( J.GT.ISTART ) THEN
+ CTEMP = H( J, J-1 )
+ CALL ZLARTG( CTEMP, H( J+1, J-1 ), C, S, H( J, J-1 ) )
+ H( J+1, J-1 ) = CZERO
+ END IF
+*
+ DO 100 JC = J, ILASTM
+ CTEMP = C*H( J, JC ) + S*H( J+1, JC )
+ H( J+1, JC ) = -DCONJG( S )*H( J, JC ) + C*H( J+1, JC )
+ H( J, JC ) = CTEMP
+ CTEMP2 = C*T( J, JC ) + S*T( J+1, JC )
+ T( J+1, JC ) = -DCONJG( S )*T( J, JC ) + C*T( J+1, JC )
+ T( J, JC ) = CTEMP2
+ 100 CONTINUE
+ IF( ILQ ) THEN
+ DO 110 JR = 1, N
+ CTEMP = C*Q( JR, J ) + DCONJG( S )*Q( JR, J+1 )
+ Q( JR, J+1 ) = -S*Q( JR, J ) + C*Q( JR, J+1 )
+ Q( JR, J ) = CTEMP
+ 110 CONTINUE
+ END IF
+*
+ CTEMP = T( J+1, J+1 )
+ CALL ZLARTG( CTEMP, T( J+1, J ), C, S, T( J+1, J+1 ) )
+ T( J+1, J ) = CZERO
+*
+ DO 120 JR = IFRSTM, MIN( J+2, ILAST )
+ CTEMP = C*H( JR, J+1 ) + S*H( JR, J )
+ H( JR, J ) = -DCONJG( S )*H( JR, J+1 ) + C*H( JR, J )
+ H( JR, J+1 ) = CTEMP
+ 120 CONTINUE
+ DO 130 JR = IFRSTM, J
+ CTEMP = C*T( JR, J+1 ) + S*T( JR, J )
+ T( JR, J ) = -DCONJG( S )*T( JR, J+1 ) + C*T( JR, J )
+ T( JR, J+1 ) = CTEMP
+ 130 CONTINUE
+ IF( ILZ ) THEN
+ DO 140 JR = 1, N
+ CTEMP = C*Z( JR, J+1 ) + S*Z( JR, J )
+ Z( JR, J ) = -DCONJG( S )*Z( JR, J+1 ) + C*Z( JR, J )
+ Z( JR, J+1 ) = CTEMP
+ 140 CONTINUE
+ END IF
+ 150 CONTINUE
+*
+ 160 CONTINUE
+*
+ 170 CONTINUE
+*
+* Drop-through = non-convergence
+*
+ 180 CONTINUE
+ INFO = ILAST
+ GO TO 210
+*
+* Successful completion of all QZ steps
+*
+ 190 CONTINUE
+*
+* Set Eigenvalues 1:ILO-1
+*
+ DO 200 J = 1, ILO - 1
+ ABSB = ABS( T( J, J ) )
+ IF( ABSB.GT.SAFMIN ) THEN
+ SIGNBC = DCONJG( T( J, J ) / ABSB )
+ T( J, J ) = ABSB
+ IF( ILSCHR ) THEN
+ CALL ZSCAL( J-1, SIGNBC, T( 1, J ), 1 )
+ CALL ZSCAL( J, SIGNBC, H( 1, J ), 1 )
+ ELSE
+ H( J, J ) = H( J, J )*SIGNBC
+ END IF
+ IF( ILZ )
+ $ CALL ZSCAL( N, SIGNBC, Z( 1, J ), 1 )
+ ELSE
+ T( J, J ) = CZERO
+ END IF
+ ALPHA( J ) = H( J, J )
+ BETA( J ) = T( J, J )
+ 200 CONTINUE
+*
+* Normal Termination
+*
+ INFO = 0
+*
+* Exit (other than argument error) -- return optimal workspace size
+*
+ 210 CONTINUE
+ WORK( 1 ) = DCMPLX( N )
+ RETURN
+*
+* End of ZHGEQZ
+*
+ END