成功解决ValueError: Dimension 1 in both shapes must be equal, but are 1034 and 1024. Shapes are [100,103

成功解决ValueError: Dimension 1 in both shapes must be equal, but are 1034 and 1024. Shapes are [100,1034] and [100,1024]. for 'Assign_8' (op: 'Assign') with input shapes: [100,1034], [100,1024].

 

 

目录

解决问题

解决思路

解决方法


 

 

 

 

解决问题

ValueError: Dimension 1 in both shapes must be equal, but are 1034 and 1024. Shapes are [100,1034] and [100,1024]. for 'Assign_8' (op: 'Assign') with input shapes: [100,1034], [100,1024].

 

 

 

解决思路

值错误:两种形状的维度1必须相等,但是是1034和1024。形状为[100,1034]和[100,1024]。对于具有输入形状的'Assign_8' (op: 'Assign'):[100,1034],[100,1024]。

 

 

 

解决方法

读入的h5文件本身错误,需要重新训练得到最新的h5文件,然后导入读取即可!

g.load_weights('models/DCGAN1123.h5')

 

 

 

 

 

 

 

 

 

一个处女座的程序猿 CSDN认证博客专家 华为杯研电赛一等 华为研数模一等奖 国内外AI竞十
人工智能硕博生,目前兼职国内外多家头部人工智能公司的AI技术顾问。拥有十多项发明专利(6项)和软件著作权(9项),多个国家级证书(2个国三级、3个国四级),先后获得国内外“人工智能算法”竞赛(包括国家级、省市级等,一等奖5项、二等奖4项、三等奖2项)相关证书十多个,以上均以第一作者身份,并拥有省市校级个人荣誉证书十多项。正在撰写《人工智算法最新实战》一书,目前已37万字。
已标记关键词 清除标记
目前在学习目标检测识别的方向。 自己参考了一些论文 对原版的SSD进行了一些改动工作 前面的网络模型部分已经修改完成且不报错。 但是在进行训练操作的时候会出现 ’ValueError: Dimension 0 in both shapes must be equal, but are 233920 and 251392. Shapes are [233920] and [251392]. for 'ssd_losses/Select' (op: 'Select') with input shapes: [251392], [233920], [251392]. ‘ ‘两个形状中的尺寸0必须相等,但分别为233920和251392。形状有[233920]和[251392]。对于输入形状为[251392]、[233920]、[251392]的''ssd_losses/Select' (op: 'Select') ![图片说明](https://img-ask.csdn.net/upload/201904/06/1554539638_631515.png) ![图片说明](https://img-ask.csdn.net/upload/201904/06/1554539651_430990.png) # SSD loss function. # =========================================================================== # def ssd_losses(logits, localisations, gclasses, glocalisations, gscores, match_threshold=0.5, negative_ratio=3., alpha=1., label_smoothing=0., device='/cpu:0', scope=None): with tf.name_scope(scope, 'ssd_losses'): lshape = tfe.get_shape(logits[0], 5) num_classes = lshape[-1] batch_size = lshape[0] # Flatten out all vectors! flogits = [] fgclasses = [] fgscores = [] flocalisations = [] fglocalisations = [] for i in range(len(logits)): flogits.append(tf.reshape(logits[i], [-1, num_classes])) fgclasses.append(tf.reshape(gclasses[i], [-1])) fgscores.append(tf.reshape(gscores[i], [-1])) flocalisations.append(tf.reshape(localisations[i], [-1, 4])) fglocalisations.append(tf.reshape(glocalisations[i], [-1, 4])) # And concat the crap! logits = tf.concat(flogits, axis=0) gclasses = tf.concat(fgclasses, axis=0) gscores = tf.concat(fgscores, axis=0) localisations = tf.concat(flocalisations, axis=0) glocalisations = tf.concat(fglocalisations, axis=0) dtype = logits.dtype # Compute positive matching mask... pmask = gscores > match_threshold fpmask = tf.cast(pmask, dtype) n_positives = tf.reduce_sum(fpmask) # Hard negative mining... no_classes = tf.cast(pmask, tf.int32) predictions = slim.softmax(logits) nmask = tf.logical_and(tf.logical_not(pmask), gscores > -0.5) fnmask = tf.cast(nmask, dtype) nvalues = tf.where(nmask, predictions[:, 0], 1. - fnmask) nvalues_flat = tf.reshape(nvalues, [-1]) # Number of negative entries to select. max_neg_entries = tf.cast(tf.reduce_sum(fnmask), tf.int32) n_neg = tf.cast(negative_ratio * n_positives, tf.int32) + batch_size n_neg = tf.minimum(n_neg, max_neg_entries) val, idxes = tf.nn.top_k(-nvalues_flat, k=n_neg) max_hard_pred = -val[-1] # Final negative mask. nmask = tf.logical_and(nmask, nvalues < max_hard_pred) fnmask = tf.cast(nmask, dtype) # Add cross-entropy loss. with tf.name_scope('cross_entropy_pos'): loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=gclasses) loss = tf.div(tf.reduce_sum(loss * fpmask), batch_size, name='value') tf.losses.add_loss(loss) with tf.name_scope('cross_entropy_neg'): loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=no_classes) loss = tf.div(tf.reduce_sum(loss * fnmask), batch_size, name='value') tf.losses.add_loss(loss) # Add localization loss: smooth L1, L2, ... with tf.name_scope('localization'): # Weights Tensor: positive mask + random negative. weights = tf.expand_dims(alpha * fpmask, axis=-1) loss = custom_layers.abs_smooth(localisations - glocalisations) loss = tf.div(tf.reduce_sum(loss * weights), batch_size, name='value') tf.losses.add_loss(loss) ``` ``` 研究了一段时间的源码 (因为只是SSD-Tensorflow-Master中的ssd_vgg_300.py中定义网络结构的那部分做了修改 ,loss函数代码部分并没有进行改动)所以没所到错误所在,网上也找不到相关的解决方案。 希望大神能够帮忙解答 感激不尽~
在论文所给的代码文件中,提示用test.py文件直接进行测试,但是在测试过程中设置好参数后出现了维度(貌似?)问题 ``` __author__ = 'WEI' import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # BE QUIET!!!! from mynetworks import load_trainable_vars import numpy as np import numpy.linalg as la import tensorflow as tf import myshrinkage from scipy.io import savemat from scipy.io import loadmat from os import path if __name__ == '__main__': shrink='soft' #'soft''gm' SNRrange = '10to20dB' # '10to20dB''0to10dB' type = 'ULA' # 'UPA' MN='256128' #get the same Compressive Sensing matrix A D = loadmat(type+'CSmatrix'+MN) A = D['A'] #get the trained network trainedfilename = type+'_'+shrink+'_'+MN+SNRrange+'.npz' saveresultsname = type+'results'+'_'+shrink+'_'+MN+'.mat' if (not path.exists(saveresultsname)): #print('There is no file for saving the result before, and it has been generated!') if shrink is 'soft': LAMP_nmse = np.zeros(dtype=np.float64, shape=(5)) D = dict(LAMP_nmse=LAMP_nmse) savemat(saveresultsname, D) else: if shrink is 'gm': GMLAMP_nmse = np.zeros(dtype=np.float64, shape=(5)) D = dict(GMLAMP_nmse=GMLAMP_nmse) savemat(saveresultsname, D) T=8 untied=1 eta,theta_init=myshrinkage.get_shrinkage_function(shrink) layer=[] var_all=[] M,N=A.shape A_=tf.constant(A,dtype=tf.float64) B=A.T/(1.01*la.norm(A,2)**2) B_=tf.Variable(B,dtype=tf.float64,name='B_0') var_all.append(B_) x_ = tf.placeholder(tf.complex128, (N, None)) y_ = tf.placeholder(tf.complex128, (M, None)) xreal_=tf.real(x_) ximag_=tf.imag(x_) yreal_=tf.real(y_) yimag_=tf.imag(y_) #The first layer : v=y Byreal_=tf.matmul(B_,yreal_) Byimag_=tf.matmul(B_,yimag_) By_=tf.complex(Byreal_,Byimag_) #theta_init=theta_init*np.ones((N,1),dtype=np.float64) theta_=tf.Variable(theta_init,dtype=tf.float64,name='theta_0') var_all.append(theta_) OneOverM=tf.constant(float(1)/M,dtype=tf.float64) NOverM=tf.constant(float(N)/M,dtype=tf.complex128) rvar_=OneOverM*tf.reduce_sum(tf.square(tf.abs(y_)),0) xhat_,dxdr_=eta(By_,rvar_,theta_) v_=y_ for t in range(1,T): b_=NOverM*dxdr_ Axreal_=tf.matmul(A_,tf.real(xhat_)) Aximag_=tf.matmul(A_,tf.imag(xhat_)) Ax_=tf.complex(Axreal_,Aximag_) v_=y_-Ax_+b_*v_ temp=tf.abs(v_) rvar_=OneOverM*tf.reduce_sum(temp*temp,0) theta_=tf.Variable(theta_init,dtype=tf.float64,name='theta_'+str(t)) var_all.append(theta_) if untied: B_=tf.Variable(B,dtype=tf.float64,name='B_'+str(t)) Bvreal_=tf.matmul(B_,tf.real(v_)) Bvimag_=tf.matmul(B_,tf.imag(v_)) Bv_=tf.complex(Bvreal_,Bvimag_) rhat_=xhat_+Bv_ var_all.append(B_) layer.append( ('LAMP-{0} linear T={1}'.format(shrink,t+1),rhat_ ,(B_,),tuple(var_all),(0,) ) ) else: Bvreal_=tf.matmul(B_,tf.real(v_)) Bvimag_=tf.matmul(B_,tf.imag(v_)) Bv_=tf.complex(Bvreal_,Bvimag_) rhat_=xhat_+Bv_ xhat_,dxdr_=eta(rhat_,rvar_,theta_) nmse_ = tf.reduce_mean( tf.reduce_sum(tf.square(tf.abs(xhat_ -x_)), axis=0) / tf.reduce_sum(tf.square(tf.abs(x_)), axis=0)) sess=tf.Session() sess.run(tf.global_variables_initializer()) load_trainable_vars(sess,trainedfilename) SNR=[] nmse_SNR=[] for v in tf.global_variables(): theta1=sess.run(v) print(str(v.name)+' '+ str(theta1)) if SNRrange is '0to10dB': SNRindex=[0,5] ibegin=0 iend=2 else : if SNRrange is '10to20dB': SNRindex=[10,15,20] ibegin=2 iend=5 for snr in SNRindex: D = loadmat(type+'testproblem'+ MN + str(snr) + 'dB.mat') xt=D['x'] yt=D['y'] xhat=sess.run(xhat_,feed_dict={x_:xt,y_:yt}) nmse=sess.run(nmse_,feed_dict={x_:xt,y_:yt}) nmse_dB=10*np.log10(nmse) print(str(snr)+'dB:'+str(nmse_dB)) SNR=np.append(SNR,snr) nmse_SNR=np.append(nmse_SNR,nmse_dB) print(nmse_SNR) results=loadmat(saveresultsname) if shrink is 'gm': GMLAMP_nmse=results['GMLAMP_nmse'] GMLAMP_nmse=GMLAMP_nmse[0] GMLAMP_nmse[ibegin:iend]=nmse_SNR print(GMLAMP_nmse) D=dict(GMLAMP_nmse=GMLAMP_nmse) savemat(saveresultsname,D) else: if shrink is 'soft': #print(results) LAMP_nmse=results['LAMP_nmse'] LAMP_nmse=LAMP_nmse[0] LAMP_nmse[ibegin:iend]=nmse_SNR print(LAMP_nmse) D = dict(LAMP_nmse=LAMP_nmse) savemat(saveresultsname, D) ``` 在shrink设置为gm时没有报错,但是为shrink时候报错 ![图片说明](https://img-ask.csdn.net/upload/202008/09/1596904197_312265.png) ![图片说明](https://img-ask.csdn.net/upload/202008/09/1596904244_647252.png) 猜测可能是soft的相关训练集出现了问题? 但是训练集都是论文文件直接给好的npz文件,我要去修改它吗? 但是根据生成的mat垫文件进行仿真后,出来shrink=gm的图又跟实验结果完全不一样; 才接触tensorflow和anaconda,如果太白痴各位大佬别骂我,,,嘤嘤嘤 想问下大佬们呢这个维度错误如何解决
©️2020 CSDN 皮肤主题: 代码科技 设计师:Amelia_0503 返回首页
实付 29.90元
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、C币套餐、付费专栏及课程。

余额充值