function [beta,it,vobj,lambda]=gme8(y,x,z,v) %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %input1: dependent variable %input2: regressors %input3: z-entropy %input4: v-entropy (v for one observation is enough) %output1: beta %output2: numbers of iteration %output3: value of objective function %output4: lagrange multiplier %n-number of observations; k-number of regressors [n k]=size(x); %i-dimension of support for z-entropy i=size(z,2); %j-dimension of support for v-entropy j=size(v,2); %V-full support for errors V=repmat(v,n,1); %initial value len=1; change=1; lambda=zeros(n,1); it=0; while change>1e-6 it=it+1; %evaluate the prob. p and w of z, v at the value of lambda newz=exp(-(z.*kron(transpose(x)*lambda,ones(1,i)))); sum_newz=newz*ones(i,1); p=newz./repmat(sum_newz,1,i); w=exp(-lambda*v)./repmat(sum(transpose(exp(-lambda*v)))',1,j); %evaluate Jocobian g=y-x*(z.*p*ones(i,1))-V.*w*ones(j,1); %evaluate Hessian. %the direct inversion of the Hessian would involve inverting a n x n matrix %which is very expensive in computing time. Instead, %Use the updating formula (Greene, %4th Edition, p. 32), %which requires only the inverse of a k by k matrix %first get the inverse of var-cov matrix of z and v inv_z=diag((sum((p.*(z.^2))')-sum((p.*z)').^2).^(-1)); inv_v=diag((sum((w.*(V.^2))')-sum((w.*V)').^2).^(-1)); %get inverse of Hessian temp=inv_v*x; inv_H=-inv_v+temp*inv(inv_z+x'*temp)*temp'; %evaluate step direction increm=inv_H*g; %update lambda lambda=lambda+increm; %calculate the tolerance, using g'inv(H)*g which is scale free len0=len; len=g'*increm; change=abs(len-len0); end %evaluate BETA beta=sum((p.*z)')'; %evaluate objective function vobj=transpose(y)*(lambda)+sum(log(sum(exp(-transpose(z.*kron(transpose(x)*(lambda),ones(1,size(z,2))))))))+sum(log(sum(transpose(exp(-(lambda)*v)))));