#!/usr/bin/python
# -*- coding: utf-8 -*-
import mxnet as mx
import numpy as np
import logging
logging.basicConfig(level=logging.DEBUG)
import os
import time
from dataiter import getTrainIter, getTestIter
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects

monitor=None
gpus=0
disp_batches=100
batch_size = 2100  # 400 200 200 420 245 205 167 263
num_batches=None
load_epoch = 383  # 382

# load data
# train_rec = '##.rec'
test_rec = '##.rec'


# train = getTrainIter(train_rec, 'age_label', 64, asGray=True, to8clage=True)
val = getTestIter(test_rec, 'age_label', batch_size, asGray=True, to8clage=True, aspaperdiv=False)

model_prefix = '##'
save_model_prefix = '##'
# # custom metric
# eval_metrics = mx.metric.CompositeEvalMetric()
# # eval_metrics.add(Acc_or_center())
# eval_metrics.add(Accuracy())
# eval_metrics.add(Diag_acc())
# eval_metrics.add(CenterLossMetric())

def softmax(x):
    return np.exp(x) / np.sum(np.exp(x), axis=0)
'''1
devs = mx.cpu() if gpus is None or gpus is '' else mx.gpu(int(gpus))
sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, load_epoch)
internals = sym.get_internals()
print(internals.list_outputs())
mod = mx.mod.Module(context=devs, symbol=sym)
# print(val.provide_label,arg_params)
# mod = mx.mod.Module(context=devs, symbol=sym, data_names=('data',),label_names=('age_label',))
mod.bind(val.provide_data, val.provide_label, for_training=False)
# mod.set_params(arg_params, aux_params)
mod.set_params(arg_params, aux_params, allow_missing=True)
'''
'''0
print('--------------------------******************--------------------------')
internals = sym.get_internals()
print(internals.list_outputs())
# embedding_layer = internals['embedding_output']
embedding_layer = internals['quantiflatten1_output']
feature_extractor = mx.model.FeedForward(ctx=mx.gpu(0), symbol=embedding_layer, numpy_batch_size=1,\
        arg_params = arg_params, aux_params=aux_params, allow_extra_params=True)
batch = val.next()
pred = feature_extractor.predict(batch.data[0])
print('pred.shape: ', pred.shape, '\n', 'pred: ', pred)
print('pred type:',type(pred))
# pred=np.array([np.array(pred[:,i*2:i*2+2]) for i in range(7)])
pred=np.reshape(pred,(128,7,-1))
print('pred.shape: ', pred.shape, '\n')
pred = np.argmax(pred,axis=-1)
print('pred.shape: ', pred.shape, '\n', 'pred: ', pred)
print('pred type:', type(pred))
# print(pred.shape,label.shape,pred,label)
print('--------------------------******************--------------------------')
'''

'''1
# print(mod.get_params(), len(mod.get_params()))
# print(mod.get_params()[0]['Quan_fc-7_bias'].asnumpy())
fc_bias=mod.get_params()[0]['Quan_fc-7_bias'].asnumpy()
print('fc_bias.shape:',fc_bias.shape)

list_bias=[]
for i in range(7):
    ind=i*2
    fc_bias[ind:ind+2]
    list_bias.append(fc_bias[ind:ind + 2])

arg_params_Quan_fc7_bias =arg_params['Quan_fc-7_bias'].asnumpy()
fc_bias_0 = [value for i, value in enumerate(fc_bias) if i % 2 == 0]
fc_bias_1 = [value for i, value in enumerate(fc_bias) if i % 2 != 0]
print('--------------------------******************--------------------------')
print('arg_params type:', type(arg_params))
print('arg_params_Quan_fc7_bias:', arg_params_Quan_fc7_bias,type(arg_params_Quan_fc7_bias),arg_params_Quan_fc7_bias.shape)
arg_params_Quan_fc7_bias = mx.nd.array(arg_params_Quan_fc7_bias)
print('--------------------------******************--------------------------')
print('fc_bias:', fc_bias, len(fc_bias))
print('fc_bias_0:',fc_bias_0)
print('fc_bias_1:',fc_bias_1)
fc_bias=softmax(fc_bias)
fc_bias_0 = softmax(fc_bias_0)
fc_bias_1 = softmax(fc_bias_1)
print('--------------------------******************--------------------------')
print('fc_bias softmax:', fc_bias, len(fc_bias))
print('fc_bias_0 softmax:', fc_bias_0, len(fc_bias_0))
print('fc_bias_1 softmax:', fc_bias_1, len(fc_bias_1))
print('--------------------------******************--------------------------')
plt.figure(figsize=(9, 3))
plt.subplot(1, 3, 1)
plt.plot(range(len(fc_bias)), fc_bias, 'b', label='fc_bias distribution')
plt.subplot(1, 3, 2)
plt.plot(range(len(fc_bias_0)), fc_bias_0, 'r', label='偶数节点')
plt.subplot(1, 3, 3)
plt.plot(range(len(fc_bias_1)), fc_bias_1, 'g', label='奇数节点')
plt.show()
#monitor = mx.mon.Monitor(monitor, pattern=".*") # if args.monitor > 0 else None
# mod.score(val, eval_metric=eval_metrics, num_batch=num_batches, display=True,
#     batch_end_callback=mx.callback.Speedometer(batch_size, disp_batches, auto_reset=False))
'''

def _save_model(model_prefix):
    dst_dir = os.path.dirname(model_prefix)
    return mx.callback.do_checkpoint(model_prefix)

def main():
    # load model, get embedding layer
    # model = mx.model.FeedForward.load(
    #     'models_resnet_qfa_or_center_embedding_2dim_8clage_1off_fullmodel/resnet_qfa_or_center_embedding_2dim_adience-2align_8clage_1off-fullmodel-float', 500, ctx=mx.gpu(0), numpy_batch_size=1)
    # model = mx.model.FeedForward.load(
    #     'models_resnet_qfa_or_nocenter_embedding_2dim_8clage_1off_fullmodel/resnet_qfa_or_nocenter_embedding_2dim_adience-2align_8clage_1off-fullmodel-float', 500, ctx=mx.gpu(0), numpy_batch_size=1)
    # model = mx.mod.Module.load(model_prefix, load_epoch, context=mx.gpu(0))
    model = mx.model.FeedForward.load(model_prefix, load_epoch, ctx=mx.gpu(0), numpy_batch_size=1)
    
    internals = model.symbol.get_internals()
    print(internals.list_outputs())
    fc_bias = model.arg_params['Quan_fc-7_bias'].asnumpy()
    arg_params_Quan_fc7_bias = fc_bias.copy()
    print('fc_bias.shape:', fc_bias.shape)
    print('arg_params_Quan_fc7_bias.type:', type(arg_params_Quan_fc7_bias), arg_params_Quan_fc7_bias.shape)
    # computing the ind4=(ind2+ind6)/2 and ind5=(ind3+ind7)/2
    print('computing the ind4=(ind2+ind6)/2 and ind5=(ind3+ind7)/2')
    arg_params_Quan_fc7_bias[4] = (arg_params_Quan_fc7_bias[2]+arg_params_Quan_fc7_bias[6])/3
    arg_params_Quan_fc7_bias[5] = (arg_params_Quan_fc7_bias[3]+arg_params_Quan_fc7_bias[7])/3

    # plus
    arg_params_Quan_fc7_bias[6] = (arg_params_Quan_fc7_bias[4]+arg_params_Quan_fc7_bias[8])/3
    arg_params_Quan_fc7_bias[7] = (arg_params_Quan_fc7_bias[5]+arg_params_Quan_fc7_bias[9])/3

    fc_bias = arg_params_Quan_fc7_bias
    arg_params_Quan_fc7_bias = mx.nd.array(arg_params_Quan_fc7_bias)
    print('arg_params_Quan_fc7_bias.type:', type(
        arg_params_Quan_fc7_bias), arg_params_Quan_fc7_bias.shape)
    print('arg_params_Quan_fc7_bias:', arg_params_Quan_fc7_bias)
    model.arg_params['Quan_fc-7_bias'] = arg_params_Quan_fc7_bias
    
    '''
    list_bias = []
    for i in range(7):
        ind = i * 2
        fc_bias[ind:ind + 2]
        list_bias.append(fc_bias[ind:ind + 2])

    fc_bias_0 = [value for i, value in enumerate(fc_bias) if i % 2 == 0]
    fc_bias_1 = [value for i, value in enumerate(fc_bias) if i % 2 != 0]
    print('--------------------------********plot_start*********--------------------------')
    print('fc_bias:', fc_bias, len(fc_bias))
    print('fc_bias_0:', fc_bias_0)
    print('fc_bias_1:', fc_bias_1)
    fc_bias = softmax(fc_bias)
    fc_bias_0 = softmax(fc_bias_0)
    fc_bias_1 = softmax(fc_bias_1)
    print('--------------------------******************--------------------------')
    print('fc_bias softmax:', fc_bias, len(fc_bias))
    print('fc_bias_0 softmax:', fc_bias_0, len(fc_bias_0))
    print('fc_bias_1 softmax:', fc_bias_1, len(fc_bias_1))
    print('--------------------------******************--------------------------')
    plt.figure(figsize=(18, 6))
    plt.subplot(1, 3, 1)
    plt.plot(range(len(fc_bias)), fc_bias, 'b', label='fc_bias')
    plt.legend(loc=0)
    plt.subplot(1, 3, 2)
    plt.plot(range(len(fc_bias_0)), fc_bias_0, 'r', label='fc_bias_0')
    plt.legend(loc=0)
    plt.subplot(1, 3, 3)
    plt.plot(range(len(fc_bias_1)), fc_bias_1, 'g', label='fc_bias_1')
    plt.legend()
    plt.show()
    print('--------------------------*********plot_end*********--------------------------')
    '''
    #embedding_layer = internals['quantiflatten3_output']
    embedding_layer = internals['quantiflatten1_output']
    feature_extractor = mx.model.FeedForward(ctx=mx.gpu(0), symbol=embedding_layer, numpy_batch_size=1,\
            arg_params = model.arg_params, aux_params=model.aux_params, allow_extra_params=True)
    print ('feature_extractor loaded')

    # save model
    # checkpoint = _save_model(model_prefix)
    #mx.model.save_checkpoint(
    #    save_model_prefix, load_epoch, embedding_layer, model.arg_params, model.aux_params)

    # test_rec = '***.rec'  # 200
    #test_rec = '***.rec'
    #val = getTestIter(test_rec, 'age_label', batch_size, asGray=True, to8clage=True, aspaperdiv=False)
    
    batch = val.next()
    pred = feature_extractor.predict(batch.data[0])
    print('pred.shape: ', pred.shape, 'pred: ', pred, 'pred type:',type(pred))
    # pred = np.argmax(pred,axis=-1)
    label = batch.label[0].asnumpy()

    pred = np.array([pred[:,i*2:i*2+2] for i in range(7)])
    print('pred.shape:',pred.shape)
    pred = np.array([np.argmax(pred, axis=-1).astype('int32')for pred in pred])
    print('pred.shape:', pred.shape)
    pred = np.sum(pred, axis=0)
    print('pred.shape:', pred.shape)

    print(pred.shape,label.shape,pred,label)

    # 统计各年龄人数
    from collections import Counter
    vv = Counter(pred)
    vv = sorted(vv.items(), key=lambda x: x[0], reverse=False)
    print(vv)
    vv = Counter(label)
    vv = sorted(vv.items(), key=lambda x: x[0], reverse=False)
    print(vv)

    sub_value = abs(pred.ravel() - label.ravel())
    sam_dia_num = (sub_value == 1).sum() + (pred.flat == label.flat).sum()
    acc = (pred.flat == label.flat).sum() / float(label.shape[0])
    diag_acc = sam_dia_num/float(label.shape[0])
    print('acc:',acc,'diag_acc:',diag_acc)
    
main()


Logo

CSDN联合极客时间,共同打造面向开发者的精品内容学习社区,助力成长!

更多推荐