diff --git a/quantecon/_compute_fp.py b/quantecon/_compute_fp.py index aec0727ec..ca37cd7d4 100644 --- a/quantecon/_compute_fp.py +++ b/quantecon/_compute_fp.py @@ -261,11 +261,11 @@ def _compute_fixed_point_ig(T, v, max_iter, verbose, print_skip, is_approx_fp, _, rho = _get_mixed_actions(tableaux_curr, bases_curr) if Y.ndim <= 2: - x_new = rho.dot(Y[:m]) + x_new = rho @ Y[:m] else: shape_Y = Y.shape Y_2d = Y.reshape(shape_Y[0], np.prod(shape_Y[1:])) - x_new = rho.dot(Y_2d[:m]).reshape(shape_Y[1:]) + x_new = (rho @ Y_2d[:m]).reshape(shape_Y[1:]) if verbose == 2: error = np.max(np.abs(y_new - x_new)) diff --git a/quantecon/_dle.py b/quantecon/_dle.py index d16bfc61d..0f70f0e7a 100644 --- a/quantecon/_dle.py +++ b/quantecon/_dle.py @@ -76,11 +76,11 @@ def __init__(self, information, technology, preferences): uc = np.hstack((np.eye(self.nc), np.zeros((self.nc, self.ng)))) ug = np.hstack((np.zeros((self.ng, self.nc)), np.eye(self.ng))) phiin = np.linalg.inv(np.hstack((self.phic, self.phig))) - phiinc = uc.dot(phiin) - b11 = - self.thetah.dot(phiinc).dot(self.phii) - a1 = self.thetah.dot(phiinc).dot(self.gamma) - a12 = np.vstack((self.thetah.dot(phiinc).dot( - self.ud), np.zeros((self.nk, self.nz)))) + phiinc = uc @ phiin + b11 = - self.thetah @ phiinc @ self.phii + a1 = self.thetah @ phiinc @ self.gamma + a12 = np.vstack((self.thetah @ phiinc @ self.ud, + np.zeros((self.nk, self.nz)))) # === Creation of the A Matrix for the state transition of the LQ problem === # @@ -100,11 +100,11 @@ def __init__(self, information, technology, preferences): # === Define R,W and Q for the payoff function of the LQ problem === # - self.H = np.hstack((self.llambda, self.pih.dot(uc).dot(phiin).dot(self.gamma), self.pih.dot( - uc).dot(phiin).dot(self.ud) - self.ub, -self.pih.dot(uc).dot(phiin).dot(self.phii))) - self.G = ug.dot(phiin).dot( - np.hstack((np.zeros((self.nd, self.nh)), self.gamma, self.ud, -self.phii))) - self.S = (self.G.T.dot(self.G) + self.H.T.dot(self.H)) / 2 + self.H = np.hstack((self.llambda, self.pih @ uc @ phiin @ self.gamma, self.pih @ + uc @ phiin @ self.ud - self.ub, -self.pih @ uc @ phiin @ self.phii)) + self.G = ug @ phiin @ np.hstack((np.zeros((self.nd, self.nh)), + self.gamma, self.ud, -self.phii)) + self.S = (self.G.T @ self.G + self.H.T @ self.H) / 2 self.nx = self.nh + self.nk + self.nz self.n = self.ni + self.nh + self.nk + self.nz @@ -122,7 +122,7 @@ def __init__(self, information, technology, preferences): # === Construct output matrices for our economy using the solution to the LQ problem === # - self.A0 = self.A - self.B.dot(self.F) + self.A0 = self.A - self.B @ self.F self.Sh = self.A0[0:self.nh, 0:self.nx] self.Sk = self.A0[self.nh:self.nh + self.nk, 0:self.nx] @@ -131,12 +131,12 @@ def __init__(self, information, technology, preferences): self.Si = -self.F self.Sd = np.hstack((np.zeros((self.nd, self.nh + self.nk)), self.ud)) self.Sb = np.hstack((np.zeros((self.nb, self.nh + self.nk)), self.ub)) - self.Sc = uc.dot(phiin).dot(-self.phii.dot(self.Si) + - self.gamma.dot(self.Sk1) + self.Sd) - self.Sg = ug.dot(phiin).dot(-self.phii.dot(self.Si) + - self.gamma.dot(self.Sk1) + self.Sd) - self.Ss = self.llambda.dot(np.hstack((np.eye(self.nh), np.zeros( - (self.nh, self.nk + self.nz))))) + self.pih.dot(self.Sc) + self.Sc = uc @ phiin @ (-self.phii @ self.Si + + self.gamma @ self.Sk1 + self.Sd) + self.Sg = ug @ phiin @ (-self.phii @ self.Si + + self.gamma @ self.Sk1 + self.Sd) + self.Ss = self.llambda @ np.hstack((np.eye(self.nh), np.zeros( + (self.nh, self.nk + self.nz)))) + self.pih @ self.Sc # === Calculate eigenvalues of A0 === # self.A110 = self.A0[0:self.nh + self.nk, 0:self.nh + self.nk] @@ -146,14 +146,14 @@ def __init__(self, information, technology, preferences): # === Construct matrices for Lagrange Multipliers === # self.Mk = -2 * self.beta.item() * (np.hstack((np.zeros((self.nk, self.nh)), np.eye( - self.nk), np.zeros((self.nk, self.nz))))).dot(self.P).dot(self.A0) + self.nk), np.zeros((self.nk, self.nz))))) @ self.P @ self.A0 self.Mh = -2 * self.beta.item() * (np.hstack((np.eye(self.nh), np.zeros( - (self.nh, self.nk)), np.zeros((self.nh, self.nz))))).dot(self.P).dot(self.A0) + (self.nh, self.nk)), np.zeros((self.nh, self.nz))))) @ self.P @ self.A0 self.Ms = -(self.Sb - self.Ss) - self.Md = -(np.linalg.inv(np.vstack((self.phic.T, self.phig.T))).dot( - np.vstack((self.thetah.T.dot(self.Mh) + self.pih.T.dot(self.Ms), -self.Sg)))) - self.Mc = -(self.thetah.T.dot(self.Mh) + self.pih.T.dot(self.Ms)) - self.Mi = -(self.thetak.T.dot(self.Mk)) + self.Md = -(np.linalg.inv(np.vstack((self.phic.T, self.phig.T))) @ + np.vstack((self.thetah.T @ self.Mh + self.pih.T @ self.Ms, -self.Sg))) + self.Mc = -(self.thetah.T @ self.Mh + self.pih.T @ self.Ms) + self.Mi = -(self.thetak.T @ self.Mk) def compute_steadystate(self, nnc=2): """ @@ -168,13 +168,13 @@ def compute_steadystate(self, nnc=2): zx = np.eye(self.A0.shape[0])-self.A0 self.zz = nullspace(zx) self.zz /= self.zz[nnc] - self.css = self.Sc.dot(self.zz) - self.sss = self.Ss.dot(self.zz) - self.iss = self.Si.dot(self.zz) - self.dss = self.Sd.dot(self.zz) - self.bss = self.Sb.dot(self.zz) - self.kss = self.Sk.dot(self.zz) - self.hss = self.Sh.dot(self.zz) + self.css = self.Sc @ self.zz + self.sss = self.Ss @ self.zz + self.iss = self.Si @ self.zz + self.dss = self.Sd @ self.zz + self.bss = self.Sb @ self.zz + self.kss = self.Sk @ self.zz + self.hss = self.Sh @ self.zz def compute_sequence(self, x0, ts_length=None, Pay=None): """ @@ -195,14 +195,14 @@ def compute_sequence(self, x0, ts_length=None, Pay=None): lq = LQ(self.Q, self.R, self.A, self.B, self.C, N=self.W, beta=self.beta) xp, up, wp = lq.compute_sequence(x0, ts_length) - self.h = self.Sh.dot(xp) - self.k = self.Sk.dot(xp) - self.i = self.Si.dot(xp) - self.b = self.Sb.dot(xp) - self.d = self.Sd.dot(xp) - self.c = self.Sc.dot(xp) - self.g = self.Sg.dot(xp) - self.s = self.Ss.dot(xp) + self.h = self.Sh @ xp + self.k = self.Sk @ xp + self.i = self.Si @ xp + self.b = self.Sb @ xp + self.d = self.Sd @ xp + self.c = self.Sc @ xp + self.g = self.Sg @ xp + self.s = self.Ss @ xp # === Value of J-period risk-free bonds === # # === See p.145: Equation (7.11.2) === # @@ -212,12 +212,12 @@ def compute_sequence(self, x0, ts_length=None, Pay=None): self.R2_Price = np.empty((ts_length + 1, 1)) self.R5_Price = np.empty((ts_length + 1, 1)) for i in range(ts_length + 1): - self.R1_Price[i, 0] = self.beta * e1.dot(self.Mc).dot(np.linalg.matrix_power( - self.A0, 1)).dot(xp[:, i]) / e1.dot(self.Mc).dot(xp[:, i]) - self.R2_Price[i, 0] = self.beta**2 * e1.dot(self.Mc).dot( - np.linalg.matrix_power(self.A0, 2)).dot(xp[:, i]) / e1.dot(self.Mc).dot(xp[:, i]) - self.R5_Price[i, 0] = self.beta**5 * e1.dot(self.Mc).dot( - np.linalg.matrix_power(self.A0, 5)).dot(xp[:, i]) / e1.dot(self.Mc).dot(xp[:, i]) + self.R1_Price[i, 0] = (self.beta * e1 @ self.Mc @ np.linalg.matrix_power( + self.A0, 1) @ xp[:, i]) / (e1 @ self.Mc @ xp[:, i]) + self.R2_Price[i, 0] = (self.beta**2 * e1 @ self.Mc @ np.linalg.matrix_power( + self.A0, 2) @ xp[:, i]) / (e1 @ self.Mc @ xp[:, i]) + self.R5_Price[i, 0] = (self.beta**5 * e1 @ self.Mc @ np.linalg.matrix_power( + self.A0, 5) @ xp[:, i]) / (e1 @ self.Mc @ xp[:, i]) # === Gross rates of return on 1-period risk-free bonds === # self.R1_Gross = 1 / self.R1_Price @@ -231,20 +231,20 @@ def compute_sequence(self, x0, ts_length=None, Pay=None): # === Value of asset whose payout vector is Pay*xt === # # See p.145: Equation (7.11.1) if isinstance(Pay, np.ndarray) == True: - self.Za = Pay.T.dot(self.Mc) + self.Za = Pay.T @ self.Mc self.Q = solve_discrete_lyapunov( self.A0.T * self.beta**0.5, self.Za) self.q = self.beta / (1 - self.beta) * \ - np.trace(self.C.T.dot(self.Q).dot(self.C)) + np.trace(self.C.T @ self.Q @ self.C) self.Pay_Price = np.empty((ts_length + 1, 1)) self.Pay_Gross = np.empty((ts_length + 1, 1)) self.Pay_Gross[0, 0] = np.nan for i in range(ts_length + 1): - self.Pay_Price[i, 0] = (xp[:, i].T.dot(self.Q).dot( - xp[:, i]) + self.q) / e1.dot(self.Mc).dot(xp[:, i]) + self.Pay_Price[i, 0] = (xp[:, i].T @ self.Q @ + xp[:, i] + self.q) / e1 @ self.Mc @ xp[:, i] for i in range(ts_length): self.Pay_Gross[i + 1, 0] = self.Pay_Price[i + 1, - 0] / (self.Pay_Price[i, 0] - Pay.dot(xp[:, i])) + 0] / (self.Pay_Price[i, 0] - Pay @ (xp[:, i])) return def irf(self, ts_length=100, shock=None): @@ -276,22 +276,22 @@ def irf(self, ts_length=100, shock=None): self.b_irf = np.empty((ts_length, self.nb)) for i in range(ts_length): - self.c_irf[i, :] = self.Sc.dot( - np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T - self.s_irf[i, :] = self.Ss.dot( - np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T - self.i_irf[i, :] = self.Si.dot( - np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T - self.k_irf[i, :] = self.Sk.dot( - np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T - self.h_irf[i, :] = self.Sh.dot( - np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T - self.g_irf[i, :] = self.Sg.dot( - np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T - self.d_irf[i, :] = self.Sd.dot( - np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T - self.b_irf[i, :] = self.Sb.dot( - np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T + self.c_irf[i, :] = (self.Sc @ + np.linalg.matrix_power(self.A0, i) @ self.C @ shock).T + self.s_irf[i, :] = (self.Ss @ + np.linalg.matrix_power(self.A0, i) @ self.C @ shock).T + self.i_irf[i, :] = (self.Si @ + np.linalg.matrix_power(self.A0, i) @ self.C @ shock).T + self.k_irf[i, :] = (self.Sk @ + np.linalg.matrix_power(self.A0, i) @ self.C @ shock).T + self.h_irf[i, :] = (self.Sh @ + np.linalg.matrix_power(self.A0, i) @ self.C @ shock).T + self.g_irf[i, :] = (self.Sg @ + np.linalg.matrix_power(self.A0, i) @ self.C @ shock).T + self.d_irf[i, :] = (self.Sd @ + np.linalg.matrix_power(self.A0, i) @ self.C @ shock).T + self.b_irf[i, :] = (self.Sb @ + np.linalg.matrix_power(self.A0, i) @ self.C @ shock).T return @@ -305,13 +305,13 @@ def canonical(self): Ac2 = np.hstack((np.zeros((self.nz, self.nh)), self.a22)) Ac = np.vstack((Ac1, Ac2)) Bc = np.vstack((self.thetah, np.zeros((self.nz, self.nc)))) - Rc1 = np.hstack((self.llambda.T.dot(self.llambda), - - self.llambda.T.dot(self.ub))) - Rc2 = np.hstack((-self.ub.T.dot(self.llambda), self.ub.T.dot(self.ub))) + Rc1 = np.hstack((self.llambda.T @ self.llambda, - + self.llambda.T @ self.ub)) + Rc2 = np.hstack((-self.ub.T @ self.llambda, self.ub.T @ self.ub)) Rc = np.vstack((Rc1, Rc2)) - Qc = self.pih.T.dot(self.pih) + Qc = self.pih.T @ self.pih Nc = np.hstack( - (self.pih.T.dot(self.llambda), -self.pih.T.dot(self.ub))) + (self.pih.T @ self.llambda, -self.pih.T @ self.ub)) lq_aux = LQ(Qc, Rc, Ac, Bc, N=Nc, beta=self.beta) @@ -320,9 +320,9 @@ def canonical(self): self.F_b = F1[:, 0:self.nh] self.F_f = F1[:, self.nh:] - self.pihat = np.linalg.cholesky(self.pih.T.dot( - self.pih) + self.beta.dot(self.thetah.T).dot(P1[0:self.nh, 0:self.nh]).dot(self.thetah)).T - self.llambdahat = self.pihat.dot(self.F_b) - self.ubhat = - self.pihat.dot(self.F_f) + self.pihat = np.linalg.cholesky(self.pih.T @ + self.pih + self.beta @ self.thetah.T @ P1[0:self.nh, 0:self.nh] @ self.thetah).T + self.llambdahat = self.pihat @ self.F_b + self.ubhat = - self.pihat @ self.F_f return diff --git a/quantecon/_kalman.py b/quantecon/_kalman.py index c6a94c4b9..e7fd3c154 100644 --- a/quantecon/_kalman.py +++ b/quantecon/_kalman.py @@ -162,9 +162,9 @@ def whitener_lss(self): A, C, G, H = self.ss.A, self.ss.C, self.ss.G, self.ss.H Atil = np.vstack([np.hstack([A, np.zeros((n, n)), np.zeros((n, l))]), - np.hstack([np.dot(K, G), - A-np.dot(K, G), - np.dot(K, H)]), + np.hstack([K @ G, + A- K @ G, + K @ H]), np.zeros((l, 2*n + l))]) Ctil = np.vstack([np.hstack([C, np.zeros((n, l))]), @@ -200,16 +200,16 @@ def prior_to_filtered(self, y): """ # === simplify notation === # G, H = self.ss.G, self.ss.H - R = np.dot(H, H.T) + R = H @ H.T # === and then update === # y = np.atleast_2d(y) y.shape = self.ss.k, 1 - E = np.dot(self.Sigma, G.T) - F = np.dot(np.dot(G, self.Sigma), G.T) + R - M = np.dot(E, inv(F)) - self.x_hat = self.x_hat + np.dot(M, (y - np.dot(G, self.x_hat))) - self.Sigma = self.Sigma - np.dot(M, np.dot(G, self.Sigma)) + E = self.Sigma @ G.T + F = G @ self.Sigma @ G.T + R + M = E @ inv(F) + self.x_hat = self.x_hat + M @ (y - G @ self.x_hat) + self.Sigma = self.Sigma - M @ G @ self.Sigma def filtered_to_forecast(self): """ @@ -220,11 +220,11 @@ def filtered_to_forecast(self): """ # === simplify notation === # A, C = self.ss.A, self.ss.C - Q = np.dot(C, C.T) + Q = C @ C.T # === and then update === # - self.x_hat = np.dot(A, self.x_hat) - self.Sigma = np.dot(A, np.dot(self.Sigma, A.T)) + Q + self.x_hat = A @ self.x_hat + self.Sigma = A @ self.Sigma @ A.T + Q def update(self, y): """ @@ -265,13 +265,13 @@ def stationary_values(self, method='doubling'): # === simplify notation === # A, C, G, H = self.ss.A, self.ss.C, self.ss.G, self.ss.H - Q, R = np.dot(C, C.T), np.dot(H, H.T) + Q, R = C @ C.T, H @ H.T # === solve Riccati equation, obtain Kalman gain === # Sigma_infinity = solve_discrete_riccati(A.T, G.T, Q, R, method=method) - temp1 = np.dot(np.dot(A, Sigma_infinity), G.T) - temp2 = inv(np.dot(G, np.dot(Sigma_infinity, G.T)) + R) - K_infinity = np.dot(temp1, temp2) + temp1 = A @ Sigma_infinity @ G.T + temp2 = inv(G @ Sigma_infinity @ G.T + R) + K_infinity = temp1 @ temp2 # == record as attributes and return == # self._Sigma_infinity, self._K_infinity = Sigma_infinity, K_infinity @@ -301,21 +301,21 @@ def stationary_coefficients(self, j, coeff_type='ma'): P_mat = A P = np.identity(self.ss.n) # Create a copy elif coeff_type == 'var': - coeffs.append(np.dot(G, K_infinity)) - P_mat = A - np.dot(K_infinity, G) + coeffs.append(G @ K_infinity) + P_mat = A - K_infinity @ G P = np.copy(P_mat) # Create a copy else: raise ValueError("Unknown coefficient type") while i <= j: - coeffs.append(np.dot(np.dot(G, P), K_infinity)) - P = np.dot(P, P_mat) + coeffs.append(G @ P @ K_infinity) + P = P @ P_mat i += 1 return coeffs def stationary_innovation_covar(self): # == simplify notation == # H, G = self.ss.H, self.ss.G - R = np.dot(H, H.T) + R = H @ H.T Sigma_infinity = self.Sigma_infinity - return np.dot(G, np.dot(Sigma_infinity, G.T)) + R + return G @ Sigma_infinity @ G.T + R diff --git a/quantecon/_lqcontrol.py b/quantecon/_lqcontrol.py index 2b9fb3fc3..d48da0d75 100644 --- a/quantecon/_lqcontrol.py +++ b/quantecon/_lqcontrol.py @@ -183,17 +183,18 @@ def update_values(self): """ # === Simplify notation === # Q, R, A, B, N, C = self.Q, self.R, self.A, self.B, self.N, self.C - P, d = self.P, self.d + P, d = map(np.atleast_2d, (self.P, self.d)) + # == Some useful matrices == # - S1 = Q + self.beta * np.dot(B.T, np.dot(P, B)) - S2 = self.beta * np.dot(B.T, np.dot(P, A)) + N - S3 = self.beta * np.dot(A.T, np.dot(P, A)) + S1 = Q + self.beta * B.T @ P @ B + S2 = self.beta * B.T @ P @ A + N + S3 = self.beta * A.T @ P @ A # == Compute F as (Q + B'PB)^{-1} (beta B'PA + N) == # self.F = solve(S1, S2) # === Shift P back in time one step == # - new_P = R - np.dot(S2.T, self.F) + S3 + new_P = R - S2.T @ self.F + S3 # == Recalling that trace(AB) = trace(BA) == # - new_d = self.beta * (d + np.trace(np.dot(P, np.dot(C, C.T)))) + new_d = self.beta * (d + np.trace(P @ C @ C.T)) # == Set new state == # self.P, self.d = new_P, new_d @@ -239,15 +240,15 @@ def stationary_values(self, method='doubling'): P = solve_discrete_riccati(A0, B0, R, Q, N, method=method) # == Compute F == # - S1 = Q + self.beta * np.dot(B.T, np.dot(P, B)) - S2 = self.beta * np.dot(B.T, np.dot(P, A)) + N + S1 = Q + self.beta * B.T @ P @ B + S2 = self.beta * B.T @ P @ A + N F = solve(S1, S2) # == Compute d == # if self.beta == 1: d = 0 else: - d = self.beta * np.trace(np.dot(P, np.dot(C, C.T))) / (1 - self.beta) + d = self.beta * np.trace(P @ C @ C.T) / (1 - self.beta) # == Bind states and return values == # self.P, self.F, self.d = P, F, d @@ -314,7 +315,7 @@ def compute_sequence(self, x0, ts_length=None, method='doubling', x_path = np.empty((self.n, T+1)) u_path = np.empty((self.k, T)) w_path = random_state.standard_normal((self.j, T+1)) - Cw_path = np.dot(C, w_path) + Cw_path = C @ w_path # == Compute and record the sequence of policies == # policies = [] @@ -326,13 +327,13 @@ def compute_sequence(self, x0, ts_length=None, method='doubling', # == Use policy sequence to generate states and controls == # F = policies.pop() x_path[:, 0] = x0.flatten() - u_path[:, 0] = - np.dot(F, x0).flatten() + u_path[:, 0] = - (F @ x0).flatten() for t in range(1, T): F = policies.pop() - Ax, Bu = np.dot(A, x_path[:, t-1]), np.dot(B, u_path[:, t-1]) + Ax, Bu = A @ x_path[:, t-1], B @ u_path[:, t-1] x_path[:, t] = Ax + Bu + Cw_path[:, t] - u_path[:, t] = - np.dot(F, x_path[:, t]) - Ax, Bu = np.dot(A, x_path[:, T-1]), np.dot(B, u_path[:, T-1]) + u_path[:, t] = - F @ x_path[:, t] + Ax, Bu = A @ x_path[:, T-1], B @ u_path[:, T-1] x_path[:, T] = Ax + Bu + Cw_path[:, T] return x_path, u_path, w_path diff --git a/quantecon/_lqnash.py b/quantecon/_lqnash.py index 2b2fbbbe0..43da6e7ba 100644 --- a/quantecon/_lqnash.py +++ b/quantecon/_lqnash.py @@ -88,6 +88,8 @@ def nnash(A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2, params = map(np.asarray, params) A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2 = params + S1, S2, W1, W2, M1, M2 = map(np.atleast_2d, (S1, S2, W1, W2, M1, M2)) + # == Multiply A, B1, B2 by sqrt(beta) to enforce discounting == # A, B1, B2 = [np.sqrt(beta) * x for x in (A, B1, B2)] @@ -118,28 +120,27 @@ def nnash(A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2, F10 = F1 F20 = F2 - G2 = solve(np.dot(B2.T, P2.dot(B2))+Q2, v2) - G1 = solve(np.dot(B1.T, P1.dot(B1))+Q1, v1) - H2 = np.dot(G2, B2.T.dot(P2)) - H1 = np.dot(G1, B1.T.dot(P1)) + G2 = solve(B2.T @ P2 @ B2 + Q2, v2) + G1 = solve(B1.T @ P1 @ B1 + Q1, v1) + H2 = G2 @ B2.T @ P2 + H1 = G1 @ B1.T @ P1 # break up the computation of F1, F2 - F1_left = v1 - np.dot(H1.dot(B2)+G1.dot(M1.T), - H2.dot(B1)+G2.dot(M2.T)) - F1_right = H1.dot(A)+G1.dot(W1.T) - np.dot(H1.dot(B2)+G1.dot(M1.T), - H2.dot(A)+G2.dot(W2.T)) + F1_left = v1 - (H1 @ B2 + G1 @ M1.T) @ (H2 @ B1 + G2 @ M2.T) + F1_right = H1 @ A + G1 @ W1.T - (H1 @ B2 + G1 @ M1.T) @ \ + (H2 @ A + G2 @ W2.T) F1 = solve(F1_left, F1_right) - F2 = H2.dot(A)+G2.dot(W2.T) - np.dot(H2.dot(B1)+G2.dot(M2.T), F1) + F2 = H2 @ A + G2 @ W2.T - (H2 @ B1 + G2 @ M2.T) @ F1 - Lambda1 = A - B2.dot(F2) - Lambda2 = A - B1.dot(F1) - Pi1 = R1 + np.dot(F2.T, S1.dot(F2)) - Pi2 = R2 + np.dot(F1.T, S2.dot(F1)) + Lambda1 = A - B2 @ F2 + Lambda2 = A - B1 @ F1 + Pi1 = R1 + F2.T @ S1 @ F2 + Pi2 = R2 + F1.T @ S2 @ F1 - P1 = np.dot(Lambda1.T, P1.dot(Lambda1)) + Pi1 - \ - np.dot(np.dot(Lambda1.T, P1.dot(B1)) + W1 - F2.T.dot(M1), F1) - P2 = np.dot(Lambda2.T, P2.dot(Lambda2)) + Pi2 - \ - np.dot(np.dot(Lambda2.T, P2.dot(B2)) + W2 - F1.T.dot(M2), F2) + P1 = Lambda1.T @ P1 @ Lambda1 + Pi1 - \ + (Lambda1.T @ P1 @ B1 + W1 - F2.T @ M1) @ F1 + P2 = Lambda2.T @ P2 @ Lambda2 + Pi2 - \ + (Lambda2.T @ P2 @ B2 + W2 - F1.T @ M2) @ F2 dd = np.max(np.abs(F10 - F1)) + np.max(np.abs(F20 - F2)) diff --git a/quantecon/_lss.py b/quantecon/_lss.py index e3c3794f4..2dcba5908 100644 --- a/quantecon/_lss.py +++ b/quantecon/_lss.py @@ -188,15 +188,15 @@ def simulate(self, ts_length=100, random_state=None): x0 = random_state.multivariate_normal(self.mu_0.flatten(), self.Sigma_0) w = random_state.standard_normal((self.m, ts_length-1)) - v = self.C.dot(w) # Multiply each w_t by C to get v_t = C w_t + v = self.C @ w # Multiply each w_t by C to get v_t = C w_t # == simulate time series == # x = simulate_linear_model(self.A, x0, v, ts_length) if self.H is not None: v = random_state.standard_normal((self.l, ts_length)) - y = self.G.dot(x) + self.H.dot(v) + y = self.G @ x + self.H @ v else: - y = self.G.dot(x) + y = self.G @ x return x, y @@ -236,9 +236,9 @@ def replicate(self, T=10, num_reps=100, random_state=None): x[:, j] = x_T[:, -1] if self.H is not None: v = random_state.standard_normal((self.l, num_reps)) - y = self.G.dot(x) + self.H.dot(v) + y = self.G @ x + self.H @ v else: - y = self.G.dot(x) + y = self.G @ x return x, y @@ -270,17 +270,17 @@ def moment_sequence(self): mu_x, Sigma_x = self.mu_0, self.Sigma_0 while 1: - mu_y = G.dot(mu_x) + mu_y = G @ mu_x if H is None: - Sigma_y = G.dot(Sigma_x).dot(G.T) + Sigma_y = G @ Sigma_x @ G.T else: - Sigma_y = G.dot(Sigma_x).dot(G.T) + H.dot(H.T) + Sigma_y = G @ Sigma_x @ G.T + H @ H.T yield mu_x, mu_y, Sigma_x, Sigma_y # == Update moments of x == # - mu_x = A.dot(mu_x) - Sigma_x = A.dot(Sigma_x).dot(A.T) + C.dot(C.T) + mu_x = A @ mu_x + Sigma_x = A @ Sigma_x @ A.T + C @ C.T def stationary_distributions(self): r""" @@ -368,7 +368,7 @@ def geometric_sums(self, beta, x_t): I = np.identity(self.n) S_x = solve(I - beta * self.A, x_t) - S_y = self.G.dot(S_x) + S_y = self.G @ S_x return S_x, S_y @@ -401,12 +401,12 @@ def impulse_response(self, j=5): # Create room for coefficients xcoef = [C] - ycoef = [np.dot(G, C)] + ycoef = [G @ C] for i in range(j): - xcoef.append(np.dot(Apower, C)) - ycoef.append(np.dot(G, np.dot(Apower, C))) - Apower = np.dot(Apower, A) + xcoef.append(Apower @ C) + ycoef.append(G @ Apower @ C) + Apower = Apower @ A return xcoef, ycoef diff --git a/quantecon/_matrix_eqn.py b/quantecon/_matrix_eqn.py index 09466e268..954ed7b0e 100644 --- a/quantecon/_matrix_eqn.py +++ b/quantecon/_matrix_eqn.py @@ -72,8 +72,8 @@ def solve_discrete_lyapunov(A, B, max_it=50, method="doubling"): while diff > 1e-15: - alpha1 = alpha0.dot(alpha0) - gamma1 = gamma0 + np.dot(alpha0.dot(gamma0), alpha0.conjugate().T) + alpha1 = alpha0 @ alpha0 + gamma1 = gamma0 + alpha0 @ gamma0 @ alpha0.conjugate().T diff = np.max(np.abs(gamma1 - gamma0)) alpha0 = alpha1 @@ -171,19 +171,19 @@ def solve_discrete_riccati(A, B, Q, R, N=None, tolerance=1e-10, max_iter=500, # == Choose optimal value of gamma in R_hat = R + gamma B'B == # current_min = np.inf candidates = (0.01, 0.1, 0.25, 0.5, 1.0, 2.0, 10.0, 100.0, 10e5) - BB = np.dot(B.T, B) - BTA = np.dot(B.T, A) + BB = B.T @ B + BTA = B.T @ A for gamma in candidates: Z = R + gamma * BB cn = np.linalg.cond(Z) if cn * EPS < 1: - Q_tilde = - Q + np.dot(N.T, solve(Z, N + gamma * BTA)) + gamma * I - G0 = np.dot(B, solve(Z, B.T)) - A0 = np.dot(I - gamma * G0, A) - np.dot(B, solve(Z, N)) - H0 = gamma * np.dot(A.T, A0) - Q_tilde + Q_tilde = - Q + N.T @ solve(Z, N + gamma * BTA) + gamma * I + G0 = B @ solve(Z, B.T) + A0 = (I - gamma * G0) @ A - B @ solve(Z, N) + H0 = gamma * A.T @ A0 - Q_tilde f1 = np.linalg.cond(Z, np.inf) f2 = gamma * f1 - f3 = np.linalg.cond(I + np.dot(G0, H0)) + f3 = np.linalg.cond(I + G0 @ H0) f_gamma = max(f1, f2, f3) if f_gamma < current_min: best_gamma = gamma @@ -198,10 +198,10 @@ def solve_discrete_riccati(A, B, Q, R, N=None, tolerance=1e-10, max_iter=500, R_hat = R + gamma * BB # == Initial conditions == # - Q_tilde = - Q + np.dot(N.T, solve(R_hat, N + gamma * BTA)) + gamma * I - G0 = np.dot(B, solve(R_hat, B.T)) - A0 = np.dot(I - gamma * G0, A) - np.dot(B, solve(R_hat, N)) - H0 = gamma * np.dot(A.T, A0) - Q_tilde + Q_tilde = - Q + N.T @ solve(R_hat, N + gamma * BTA) + gamma * I + G0 = B @ solve(R_hat, B.T) + A0 = (I - gamma * G0) @ A - B @ solve(R_hat, N) + H0 = gamma * A.T @ A0 - Q_tilde i = 1 # == Main loop == # @@ -211,9 +211,9 @@ def solve_discrete_riccati(A, B, Q, R, N=None, tolerance=1e-10, max_iter=500, raise ValueError(fail_msg.format(i)) else: - A1 = np.dot(A0, solve(I + np.dot(G0, H0), A0)) - G1 = G0 + np.dot(np.dot(A0, G0), solve(I + np.dot(H0, G0), A0.T)) - H1 = H0 + np.dot(A0.T, solve(I + np.dot(H0, G0), np.dot(H0, A0))) + A1 = A0 @ solve(I + G0 @ H0, A0) + G1 = G0 + A0 @ G0 @ solve(I + H0 @ G0, A0.T) + H1 = H0 + A0.T @ solve(I + H0 @ G0, H0 @ A0) error = np.max(np.abs(H1 - H0)) A0 = A1 diff --git a/quantecon/_quadsums.py b/quantecon/_quadsums.py index d58fa739f..d3d52c3ef 100644 --- a/quantecon/_quadsums.py +++ b/quantecon/_quadsums.py @@ -54,9 +54,9 @@ def var_quadratic_sum(A, C, H, beta, x0): x0 = np.atleast_1d(x0) # == Start computations == # Q = scipy.linalg.solve_discrete_lyapunov(np.sqrt(beta) * A.T, H) - cq = np.dot(np.dot(C.T, Q), C) + cq = C.T @ Q @ C v = np.trace(cq) * beta / (1 - beta) - q0 = np.dot(np.dot(x0.T, Q), x0) + v + q0 = x0.T @ Q @ x0 + v return q0 diff --git a/quantecon/_rank_nullspace.py b/quantecon/_rank_nullspace.py index 6eb553c41..21d64fa12 100644 --- a/quantecon/_rank_nullspace.py +++ b/quantecon/_rank_nullspace.py @@ -74,7 +74,7 @@ def nullspace(A, atol=1e-13, rtol=0): If `A` is an array with shape (m, k), then `ns` will be an array with shape (k, n), where n is the estimated dimension of the nullspace of `A`. The columns of `ns` are a basis for the - nullspace; each element in numpy.dot(A, ns) will be + nullspace; each element in A @ ns will be approximately zero. Note: If both `atol` and `rtol` are positive, the combined tolerance diff --git a/quantecon/_robustlq.py b/quantecon/_robustlq.py index 86dd91fb5..75964084c 100644 --- a/quantecon/_robustlq.py +++ b/quantecon/_robustlq.py @@ -107,10 +107,10 @@ def d_operator(self, P): """ C, theta = self.C, self.theta I = np.identity(self.j) - S1 = np.dot(P, C) - S2 = np.dot(C.T, S1) + S1 = P @ C + S2 = C.T @ S1 - dP = P + np.dot(S1, solve(theta * I - S2, S1.T)) + dP = P + S1 @ solve(theta * I - S2, S1.T) return dP @@ -142,12 +142,12 @@ def b_operator(self, P): """ A, B, Q, R, beta = self.A, self.B, self.Q, self.R, self.beta - S1 = Q + beta * np.dot(B.T, np.dot(P, B)) - S2 = beta * np.dot(B.T, np.dot(P, A)) - S3 = beta * np.dot(A.T, np.dot(P, A)) + S1 = Q + beta * B.T @ P @ B + S2 = beta * B.T @ P @ A + S3 = beta * A.T @ P @ A F = solve(S1, S2) if not self.pure_forecasting else np.zeros( (self.k, self.n)) - new_P = R - np.dot(S2.T, F) + S3 + new_P = R - S2.T @ F + S3 return F, new_P @@ -253,9 +253,9 @@ def robust_rule_simple(self, P_init=None, max_iter=80, tol=1e-8): iterate += 1 P = new_P I = np.identity(self.j) - S1 = P.dot(C) - S2 = C.T.dot(S1) - K = inv(theta * I - S2).dot(S1.T).dot(A - B.dot(F)) + S1 = P @ C + S2 = C.T @ S1 + K = inv(theta * I - S2) @ S1.T @ (A - B @ F) return F, K, P @@ -280,8 +280,8 @@ def F_to_K(self, F, method='doubling'): """ Q2 = self.beta * self.theta - R2 = - self.R - np.dot(F.T, np.dot(self.Q, F)) - A2 = self.A - np.dot(self.B, F) + R2 = - self.R - F.T @ self.Q @ F + A2 = self.A - self.B @ F B2 = self.C lq = LQ(Q2, R2, A2, B2, beta=self.beta) neg_P, neg_K, d = lq.stationary_values(method=method) @@ -308,10 +308,10 @@ def K_to_F(self, K, method='doubling'): The value function for a given K """ - A1 = self.A + np.dot(self.C, K) + A1 = self.A + self.C @ K B1 = self.B Q1 = self.Q - R1 = self.R - self.beta * self.theta * np.dot(K.T, K) + R1 = self.R - self.beta * self.theta * K.T @ K lq = LQ(Q1, R1, A1, B1, beta=self.beta) P, F, d = lq.stationary_values(method=method) @@ -348,9 +348,9 @@ def compute_deterministic_entropy(self, F, K, x0): The deterministic entropy """ - H0 = np.dot(K.T, K) + H0 = K.T @ K C0 = np.zeros((self.n, 1)) - A0 = self.A - np.dot(self.B, F) + np.dot(self.C, K) + A0 = self.A - self.B @ F + self.C @ K e = var_quadratic_sum(A0, C0, H0, self.beta, x0) return e @@ -387,14 +387,14 @@ def evaluate_F(self, F): # == Solve for policies and costs using agent 2's problem == # K_F, P_F = self.F_to_K(F) I = np.identity(self.j) - H = inv(I - C.T.dot(P_F.dot(C)) / theta) + H = inv(I - C.T @ P_F @ C / theta) d_F = np.log(det(H)) # == Compute O_F and o_F == # - AO = np.sqrt(beta) * (A - np.dot(B, F) + np.dot(C, K_F)) - O_F = solve_discrete_lyapunov(AO.T, beta * np.dot(K_F.T, K_F)) + AO = np.sqrt(beta) * (A - B @ F + C @ K_F) + O_F = solve_discrete_lyapunov(AO.T, beta * K_F.T @ K_F) ho = (np.trace(H - 1) - d_F) / 2.0 - tr = np.trace(np.dot(O_F, C.dot(H.dot(C.T)))) + tr = np.trace(O_F @ C @ H @ C.T) o_F = (ho + beta * tr) / (1 - beta) return K_F, P_F, d_F, O_F, o_F diff --git a/quantecon/game_theory/localint.py b/quantecon/game_theory/localint.py index ad7ac14b9..bbd9b884a 100644 --- a/quantecon/game_theory/localint.py +++ b/quantecon/game_theory/localint.py @@ -55,7 +55,7 @@ def _play(self, actions, player_ind, tie_breaking, tol, random_state): (np.ones(self.N, dtype=int), actions, np.arange(self.N+1)), shape=(self.N, self.num_actions)) - opponent_act_dict = self.adj_matrix[player_ind].dot( + opponent_act_dict = (self.adj_matrix[player_ind] @ actions_matrix).toarray() for k, i in enumerate(player_ind): diff --git a/quantecon/game_theory/normal_form_game.py b/quantecon/game_theory/normal_form_game.py index 179779fe9..561ffa3f0 100644 --- a/quantecon/game_theory/normal_form_game.py +++ b/quantecon/game_theory/normal_form_game.py @@ -258,7 +258,7 @@ def reduce_last_player(payoff_array, action): if isinstance(action, numbers.Integral): # pure action return payoff_array.take(action, axis=-1) else: # mixed action - return payoff_array.dot(action) + return payoff_array @ action if self.num_opponents == 1: payoff_vector = \ diff --git a/quantecon/game_theory/repeated_game.py b/quantecon/game_theory/repeated_game.py index 08959da0c..ac9868067 100644 --- a/quantecon/game_theory/repeated_game.py +++ b/quantecon/game_theory/repeated_game.py @@ -286,7 +286,7 @@ def _R(delta, nums_actions, payoff_arrays, best_dev_gains, points, if (action_profile_payoff >= IC).all(): # check if payoff is inside the convex hull extended_payoff[:2] = action_profile_payoff - if (np.dot(equations, extended_payoff) <= tol).all(): + if (equations @ extended_payoff <= tol).all(): W_new[n_new_pt] = action_profile_payoff n_new_pt += 1 continue @@ -359,7 +359,7 @@ def _find_C(C, points, vertices, equations, extended_payoff, IC, tol): # check the case that IC is an interior point of the convex hull extended_payoff[:2] = IC - if (np.dot(equations, extended_payoff) <= tol).all(): + if (equations @ extended_payoff <= tol).all(): C[n, :] = IC n += 1 diff --git a/quantecon/markov/ddp.py b/quantecon/markov/ddp.py index 0b76fc52d..cb710c726 100644 --- a/quantecon/markov/ddp.py +++ b/quantecon/markov/ddp.py @@ -590,7 +590,7 @@ def bellman_operator(self, v, Tv=None, sigma=None): Updated value function vector, of length n. """ - vals = self.R + self.beta * self.Q.dot(v) # Shape: (L,) or (n, m) + vals = self.R + self.beta * self.Q @ v # Shape: (L,) or (n, m) if Tv is None: Tv = np.empty(self.num_states) @@ -613,7 +613,7 @@ def T_sigma(self, sigma): """ R_sigma, Q_sigma = self.RQ_sigma(sigma) - return lambda v: R_sigma + self.beta * Q_sigma.dot(v) + return lambda v: R_sigma + self.beta * Q_sigma @ v def compute_greedy(self, v, sigma=None): """ diff --git a/quantecon/markov/gth_solve.py b/quantecon/markov/gth_solve.py index b3257c7bf..4bc99f2f7 100644 --- a/quantecon/markov/gth_solve.py +++ b/quantecon/markov/gth_solve.py @@ -78,12 +78,12 @@ def gth_solve(A, overwrite=False, use_jit=True): break A1[k+1:n, k] /= scale - A1[k+1:n, k+1:n] += np.dot(A1[k+1:n, k:k+1], A1[k:k+1, k+1:n]) + A1[k+1:n, k+1:n] += A1[k+1:n, k:k+1] @ A1[k:k+1, k+1:n] # === Backward substitution === # x[n-1] = 1 for k in range(n-2, -1, -1): - x[k] = np.dot(x[k+1:n], A1[k+1:n, k]) + x[k] = x[k+1:n] @ A1[k+1:n, k] # === Normalization === # x /= np.sum(x) diff --git a/quantecon/markov/tests/test_core.py b/quantecon/markov/tests/test_core.py index bc765b370..44b0765cd 100644 --- a/quantecon/markov/tests/test_core.py +++ b/quantecon/markov/tests/test_core.py @@ -225,12 +225,12 @@ def test_left_eigen_vec(self): stationary_distributions = self.stationary if self.n_stat_dists == 1: - assert_allclose(np.dot(stationary_distributions, mc.P), + assert_allclose(stationary_distributions @ mc.P, stationary_distributions, atol=self.TOL) else: for i in range(self.n_stat_dists): curr_v = stationary_distributions[i, :] - assert_allclose(np.dot(curr_v, mc.P), curr_v, atol=self.TOL) + assert_allclose(curr_v @ mc.P, curr_v, atol=self.TOL) def test_simulate_shape(): diff --git a/quantecon/markov/tests/test_gth_solve.py b/quantecon/markov/tests/test_gth_solve.py index f5275e869..a13560bdb 100644 --- a/quantecon/markov/tests/test_gth_solve.py +++ b/quantecon/markov/tests/test_gth_solve.py @@ -108,7 +108,7 @@ def __call__(self, x): class StationaryDistLeftEigenVec(AddDescription): def __call__(self, A, x): - assert_allclose(np.dot(x, A), x, atol=TOL) + assert_allclose(x @ A, x, atol=TOL) class StationaryDistEqualToKnown(AddDescription): diff --git a/quantecon/quad.py b/quantecon/quad.py index cf8860d47..6b3f5d397 100644 --- a/quantecon/quad.py +++ b/quantecon/quad.py @@ -295,8 +295,8 @@ def qnwnorm(n, mu=None, sig2=None, usesqrtm=False): new_sig2 = la.cholesky(sig2) if d > 1: - nodes = nodes.dot(new_sig2) + mu # Broadcast ok - else: # nodes.dot(sig) will not be aligned in scalar case. + nodes = nodes @ new_sig2 + mu # Broadcast ok + else: # nodes @ sig will not be aligned in scalar case. nodes = nodes * new_sig2 + mu return nodes.squeeze(), weights @@ -547,7 +547,7 @@ def quadrect(f, n, a, b, kind='lege', random_state=None, *args, **kwargs): else: nodes, weights = qnwequi(n, a, b, kind, random_state=random_state) - out = weights.dot(f(nodes, *args, **kwargs)) + out = weights @ f(nodes, *args, **kwargs) return out diff --git a/quantecon/tests/test_kalman.py b/quantecon/tests/test_kalman.py index 07e468a33..b3ea05aba 100644 --- a/quantecon/tests/test_kalman.py +++ b/quantecon/tests/test_kalman.py @@ -17,8 +17,8 @@ def setup_method(self): self.G = np.eye(2) * .5 self.H = np.eye(2) * np.sqrt(0.2) - self.Q = np.dot(self.C, self.C.T) - self.R = np.dot(self.H, self.H.T) + self.Q = self.C @ self.C.T + self.R = self.H @ self.H.T ss = LinearStateSpace(self.A, self.C, self.G, self.H) @@ -38,13 +38,13 @@ def test_stationarity(self): for method in self.methods: sig_inf, kal_gain = kf.stationary_values(method=method) - mat_inv = np.linalg.inv(G.dot(sig_inf).dot(G.T) + R) + mat_inv = np.linalg.inv(G @ sig_inf @ G.T + R) # Compute the kalmain gain and sigma infinity according to the # recursive equations and compare - kal_recursion = np.dot(A, sig_inf).dot(G.T).dot(mat_inv) - sig_recursion = (A.dot(sig_inf).dot(A.T) - - kal_recursion.dot(G).dot(sig_inf).dot(A.T) + Q) + kal_recursion = A @ sig_inf @ G.T @ mat_inv + sig_recursion = (A @ sig_inf @ A.T - + kal_recursion @ G @ sig_inf @ A.T + Q) assert_allclose(kal_gain, kal_recursion, rtol=1e-4, atol=1e-2) assert_allclose(sig_inf, sig_recursion, rtol=1e-4, atol=1e-2) @@ -75,12 +75,12 @@ def test_update_nonstationary(self): kf.set_state(curr_x, curr_sigma) kf.update(y_observed) - mat_inv = np.linalg.inv(G.dot(curr_sigma).dot(G.T) + R) - curr_k = np.dot(A, curr_sigma).dot(G.T).dot(mat_inv) - new_sigma = (A.dot(curr_sigma).dot(A.T) - - curr_k.dot(G).dot(curr_sigma).dot(A.T) + Q) + mat_inv = np.linalg.inv(G @ curr_sigma @ G.T + R) + curr_k = A @ curr_sigma @ G.T @ mat_inv + new_sigma = (A @ curr_sigma@ A.T - + curr_k @ G @ curr_sigma @ A.T + Q) - new_xhat = A.dot(curr_x) + curr_k.dot(y_observed - G.dot(curr_x)) + new_xhat = A @ curr_x + curr_k @ (y_observed - G @ curr_x) assert_allclose(kf.Sigma, new_sigma, rtol=1e-4, atol=1e-2) assert_allclose(kf.x_hat, new_xhat, rtol=1e-4, atol=1e-2) diff --git a/quantecon/tests/test_lqcontrol.py b/quantecon/tests/test_lqcontrol.py index bf9c3dc29..fa4b0efc3 100644 --- a/quantecon/tests/test_lqcontrol.py +++ b/quantecon/tests/test_lqcontrol.py @@ -4,7 +4,6 @@ """ import numpy as np from numpy.testing import assert_allclose, assert_raises -from numpy import dot from quantecon import LQ, LQMarkov @@ -49,7 +48,7 @@ def test_scalar_sequences(self): (2*lq_scalar.Q+lq_scalar.beta*lq_scalar.Rf*2*lq_scalar.B**2) \ * x0 x_1 = lq_scalar.A * x0 + lq_scalar.B * u_0 + \ - dot(lq_scalar.C, w_seq[0, -1]) + lq_scalar.C * w_seq[0, -1] assert_allclose(u_0, u_seq, rtol=1e-4) assert_allclose(x_1, x_seq[0, -1], rtol=1e-4) @@ -83,7 +82,7 @@ def test_stationary_mat(self): for method in self.methods: P, F, d = lq_mat.stationary_values(method=method) - val_func_lq = np.dot(x0, P).dot(x0) + val_func_lq = x0 @ P @ x0 assert_allclose(f_answer, F, atol=1e-3) assert_allclose(val_func_lq, val_func_answer, atol=1e-3) diff --git a/quantecon/tests/test_lqnash.py b/quantecon/tests/test_lqnash.py index 08cc0b778..655b2081b 100644 --- a/quantecon/tests/test_lqnash.py +++ b/quantecon/tests/test_lqnash.py @@ -86,11 +86,11 @@ def test_nnash(self): f1, f2, p1, p2 = nnash(a, b1, b2, r1, r2, q1, q2, s1, s2, w1, w2, m1, m2) - aaa = a - b1.dot(f1) - b2.dot(f2) + aaa = a - b1 @ f1 - b2 @ f2 aa = aaa[:2, :2] tf = np.eye(2)-aa tfi = np.linalg.inv(tf) - xbar = tfi.dot(aaa[:2, 2]) + xbar = tfi @ aaa[:2, 2] # Define answers from matlab. TODO: this is ghetto f1_ml = np.array([ diff --git a/quantecon/tests/test_matrix_eqn.py b/quantecon/tests/test_matrix_eqn.py index 1624e27fc..6475c3cf6 100644 --- a/quantecon/tests/test_matrix_eqn.py +++ b/quantecon/tests/test_matrix_eqn.py @@ -34,5 +34,5 @@ def test_solve_discrete_lyapunov_complex(): X = qme.solve_discrete_lyapunov(A, B) - assert_allclose(np.dot(np.dot(A, X), A.conj().transpose()) - X, -B, + assert_allclose( A @ X @ A.conj().transpose() - X, -B, atol=1e-15) diff --git a/quantecon/tests/test_ricatti.py b/quantecon/tests/test_ricatti.py index fff7139fc..d5bd40efd 100644 --- a/quantecon/tests/test_ricatti.py +++ b/quantecon/tests/test_ricatti.py @@ -62,7 +62,7 @@ def dare_tjm_3(method): B = I R = [[1, r], [r, r*r]] - Q = I - np.dot(A.T, A) + np.dot(A.T, np.linalg.solve(R + I, A)) + Q = I - A.T @ A + A.T @ np.linalg.solve(R + I, A) X = solve_discrete_riccati(A, B, Q, R, method=method) Y = np.identity(2) assert_allclose(X, Y, atol=1e-07)