Fading Coder

One Final Commit for the Last Sprint

Home > Tech > Content

Constructing and Training a Neural Network with TensorFlow: Essential Snippets

Tech May 9 3
import tensorflow as tf
import logging

log = logging.getLogger(__name__)


class NetworkBase:
    """Fundamental building blocks for constructing neural networks."""

    @staticmethod
    def init_weights(shape, stddev=0.1, var_name=None):
        init_val = tf.truncated_normal(shape=shape, mean=0.0, stddev=stddev)
        if var_name is None:
            return tf.Variable(init_val)
        return tf.get_variable(name=var_name, initializer=init_val)

    @staticmethod
    def init_biases(shape, var_name=None):
        init_val = tf.constant(0.1, shape=shape)
        if var_name is None:
            return tf.Variable(init_val)
        return tf.get_variable(name=var_name, initializer=init_val)

    @staticmethod
    def apply_conv2d(input_tensor, kernel, stride=1, padding='SAME'):
        conv = tf.nn.conv2d(
            input_tensor,
            kernel,
            strides=[1, stride, stride, 1],
            padding=padding
        )
        log.debug('conv2d output shape: %s', conv.get_shape().as_list())
        return conv

    @staticmethod
    def apply_deconv2d(input_tensor, kernel, bias, output_dims=None, stride=2):
        if output_dims is None:
            shape_vals = input_tensor.get_shape().as_list()
            shape_vals[1] *= 2
            shape_vals[2] *= 2
            shape_vals[3] = kernel.get_shape().as_list()[2]
            output_dims = shape_vals
        deconv = tf.nn.conv2d_transpose(
            input_tensor,
            kernel,
            output_dims,
            strides=[1, stride, stride, 1],
            padding='SAME'
        )
        return tf.nn.bias_add(deconv, bias)

    @staticmethod
    def apply_maxpool(input_tensor, kernel_size=2, stride=2, padding='SAME'):
        return tf.nn.max_pool(
            input_tensor,
            ksize=[1, kernel_size, kernel_size, 1],
            strides=[1, stride, stride, 1],
            padding=padding
        )

    @staticmethod
    def apply_avgpool(input_tensor, kernel_size, stride, padding='SAME'):
        return tf.nn.avg_pool(
            input_tensor,
            ksize=[1, kernel_size, kernel_size, 1],
            strides=[1, stride, stride, 1],
            padding=padding
        )

    @staticmethod
    def apply_relu(activation, name=None):
        return tf.nn.relu(activation, name=name)

    @staticmethod
    def apply_leaky_relu(activation, alpha=0.2, name=None):
        return tf.maximum(alpha * activation, activation, name=name)

    @staticmethod
    def apply_relu6(activation):
        return tf.nn.relu6(activation)

    @staticmethod
    def apply_batch_norm(x, out_channels, is_training, scope='batch_norm', decay=0.9, eps=1e-5):
        with tf.variable_scope(scope):
            beta = tf.get_variable(
                'beta',
                shape=[out_channels],
                initializer=tf.constant_initializer(0.05)
            )
            gamma = tf.get_variable(
                'gamma',
                shape=[out_channels],
                initializer=tf.random_normal_initializer(1.0, 0.02)
            )
            moving_avg = tf.train.ExponentialMovingAverage(decay=decay)
            batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')

            def update_stats():
                step = moving_avg.apply([batch_mean, batch_var])
                with tf.control_dependencies([step]):
                    return tf.identity(batch_mean), tf.identity(batch_var)

            mean, var = tf.cond(
                is_training,
                update_stats,
                lambda: (moving_avg.average(batch_mean), moving_avg.average(batch_var))
            )
            normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
        return normed

    @staticmethod
    def apply_dense(input_tensor, weights, biases):
        result = tf.add(tf.matmul(input_tensor, weights), biases)
        log.debug('dense layer output shape: %s', result.get_shape().as_list())
        return result

Additional common opeartions enclude data preprocessing and TensorBoard monitoring utilities.

import os
import numpy as np
from scipy import misc


def save_processed_image(img_array, output_dir, filename, mean_pixel=None):
    if mean_pixel is not None:
        img_array = img_array + mean_pixel
    misc.imsave(os.path.join(output_dir, f'{filename}.png'), img_array)


def subtract_mean_pixel(image, mean_pixel):
    return image - mean_pixel


def log_regularization_summary(variable):
    if variable is not None:
        tf.summary.histogram(variable.op.name, variable)
        tf.add_to_collection('regularization_loss', tf.nn.l2_loss(variable))


def log_activation_summary(variable):
    if variable is not None:
        op_name = variable.op.name
        tf.summary.histogram(f'{op_name}/activation', variable)
        tf.summary.scalar(f'{op_name}/sparsity', tf.nn.zero_fraction(variable))


def log_gradient_summary(gradient, variable):
    if gradient is not None:
        tf.summary.histogram(f'{variable.op.name}/gradient', gradient)
Tags: tensorflow

Related Articles

Understanding Strong and Weak References in Java

Strong References Strong reference are the most prevalent type of object referencing in Java. When an object has a strong reference pointing to it, the garbage collector will not reclaim its memory. F...

Comprehensive Guide to SSTI Explained with Payload Bypass Techniques

Introduction Server-Side Template Injection (SSTI) is a vulnerability in web applications where user input is improper handled within the template engine and executed on the server. This exploit can r...

Implement Image Upload Functionality for Django Integrated TinyMCE Editor

Django’s Admin panel is highly user-friendly, and pairing it with TinyMCE, an effective rich text editor, simplifies content management significantly. Combining the two is particular useful for bloggi...

Leave a Comment

Anonymous

◎Feel free to join the discussion and share your thoughts.