我就废话不多说了,大家还是直接看代码吧!

import tensorflow as tf
import numpy as np
input = tf.constant(1,shape=(64,10,1),dtype=tf.float32,name='input')#shape=(batch,in_width,in_channels)
w = tf.constant(3,shape=(3,1,32),dtype=tf.float32,name='w')#shape=(filter_width,in_channels,out_channels)
conv1 = tf.nn.conv1d(input,w,2,'VALID') #2为步长
print(conv1.shape)#宽度计算(width-kernel_size+1)/strides ,(10-3+1)/2=4 (64,4,32)
conv2 = tf.nn.conv1d(input,w,2,'SAME') #步长为2
print(conv2.shape)#宽度计算width/strides 10/2=5 (64,5,32)
conv3 = tf.nn.conv1d(input,w,1,'SAME') #步长为1
print(conv3.shape) # (64,10,32)
with tf.Session() as sess:
 print(sess.run(conv1))
 print(sess.run(conv2))
 print(sess.run(conv3))

基于Tensorflow一维卷积用法详解

以下是input_shape=(1,10,1), w = (3,1,1)时,conv1的shape

基于Tensorflow一维卷积用法详解

以下是input_shape=(1,10,1), w = (3,1,3)时,conv1的shape

基于Tensorflow一维卷积用法详解

补充知识:tensorflow中一维卷积conv1d处理语言序列举例

tf.nn.conv1d:

函数形式: tf.nn.conv1d(value, filters, stride, padding, use_cudnn_on_gpu=None, data_format=None, name=None):

程序举例:

import tensorflow as tf
import numpy as np
sess = tf.InteractiveSession()
 
# --------------- tf.nn.conv1d -------------------
inputs=tf.ones((64,10,3)) # [batch, n_sqs, embedsize]
w=tf.constant(1,tf.float32,(5,3,32)) # [w_high, embedsize, n_filers]
conv1 = tf.nn.conv1d(inputs,w,stride=2 ,padding='SAME') # conv1=[batch, round(n_sqs/stride), n_filers],stride是步长。
 
tf.global_variables_initializer().run()
out = sess.run(conv1)
print(out)

注:一维卷积中padding='SAME'只在输入的末尾填充0

tf.layters.conv1d:

函数形式:tf.layters.conv1d(inputs, filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, activation=None, use_bias=True,...)

程序举例:

import tensorflow as tf
import numpy as np
sess = tf.InteractiveSession()
 
# --------------- tf.layters.conv1d -------------------
inputs=tf.ones((64,10,3)) # [batch, n_sqs, embedsize]
num_filters=32
kernel_size =5
conv2 = tf.layers.conv1d(inputs, num_filters, kernel_size,strides=2, padding='valid',name='conv2') # shape = (batchsize, round(n_sqs/strides),num_filters)
tf.global_variables_initializer().run()
out = sess.run(conv2)
print(out)

二维卷积实现一维卷积:

import tensorflow as tf
sess = tf.InteractiveSession()
def conv2d(x, W):
 return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')
def max_pool_1x2(x):
 return tf.nn.avg_pool(x, ksize=[1,1,2,1], strides=[1,1,2,1], padding='SAME')
'''
ksize = [x, pool_height, pool_width, x]
strides = [x, pool_height, pool_width, x]
'''
 
x = tf.Variable([[1,2,3,4]], dtype=tf.float32)
x = tf.reshape(x, [1,1,4,1]) #这一步必不可少,否则会报错说维度不一致;
'''
[batch, in_height, in_width, in_channels] = [1,1,4,1]
'''
 
W_conv1 = tf.Variable([1,1,1],dtype=tf.float32) # 权重值
W_conv1 = tf.reshape(W_conv1, [1,3,1,1]) # 这一步同样必不可少
'''
[filter_height, filter_width, in_channels, out_channels]
'''
h_conv1 = conv2d(x, W_conv1) # 结果:[4,8,12,11]
h_pool1 = max_pool_1x2(h_conv1)
tf.global_variables_initializer().run()
print(sess.run(h_conv1)) # 结果array([6,11.5])x

两种池化操作:

# 1:stride max pooling
convs = tf.expand_dims(conv, axis=-1) # shape=["htmlcode">
kernel_sizes = [3,4,5] # 分别用窗口大小为3/4/5的卷积核
with tf.name_scope("mul_cnn"):
 pooled_outputs = []
 for kernel_size in kernel_sizes:
  # CNN layer
  conv = tf.layers.conv1d(embedding_inputs, self.config.num_filters, kernel_size, name='conv-%s' % kernel_size)
  # global max pooling layer
  gmp = tf.reduce_max(conv, reduction_indices=[1], name='gmp')
  pooled_outputs.append(gmp)
 self.h_pool = tf.concat(pooled_outputs, 1) #池化后进行拼接

以上这篇基于Tensorflow一维卷积用法详解就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持。

华山资源网 Design By www.eoogi.com
广告合作:本站广告合作请联系QQ:858582 申请时备注:广告合作(否则不回)
免责声明:本站资源来自互联网收集,仅供用于学习和交流,请遵循相关法律法规,本站一切资源不代表本站立场,如有侵权、后门、不妥请联系本站删除!
华山资源网 Design By www.eoogi.com

稳了!魔兽国服回归的3条重磅消息!官宣时间再确认!

昨天有一位朋友在大神群里分享,自己亚服账号被封号之后居然弹出了国服的封号信息对话框。

这里面让他访问的是一个国服的战网网址,com.cn和后面的zh都非常明白地表明这就是国服战网。

而他在复制这个网址并且进行登录之后,确实是网易的网址,也就是我们熟悉的停服之后国服发布的暴雪游戏产品运营到期开放退款的说明。这是一件比较奇怪的事情,因为以前都没有出现这样的情况,现在突然提示跳转到国服战网的网址,是不是说明了简体中文客户端已经开始进行更新了呢?