import matplotlib as mpl # Matplotlib 是 Python 的绘图库。 它可与 NumPy 一起使用import matplotlib.pyplot as plt # Python数据可视化matplotlib.pyplot# %matplotlib inline #在使用jupyter notebook 或者 jupyter qtconsole的时候,经常会用到%matplotlib inline。其作用就是在你调用plot()进行画图或者直接输入Figure的实例对象的时候,会自动的显示并把figure嵌入到console中。import numpy as np # 数值计算扩展。这种工具可用来存储和处理大型矩阵import sklearn # 机器学习中常用的第三方模块,对常用的机器学习方法进行了封装,包括回归(Regression)、降维(Dimensionality Reduction)、分类(Classfication)、聚类(Clustering)等方法。import pandas as pd # 是python的一个数据分析包
import os # 系统编程的操作模块,可以处理文件和目录
import sys # sys模块包含了与Python解释器和它的环境有关的函数
import time
import tensorflow as tffrom tensorflow import keras##################################################################################################
# 选择GPU
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"##################################################################################################print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:print(module.__name__, module.__version__)"""output:2.0.0sys.version_info(major=3, minor=7, micro=4, releaselevel='final', serial=0)matplotlib 3.1.1numpy 1.16.5pandas 0.25.1sklearn 0.21.3tensorflow 2.0.0 """
##################################################################################################
from sklearn.datasets import fetch_california_housinghousing = fetch_california_housing()
print(housing)
"""OUTPUT:{'data': array([[ 8.3252 , 41. , 6.98412698, ..., 2.55555556, 37.88 , -122.23 ],[ 8.3014 , 21. , 6.23813708, ..., 2.10984183, 37.86 , -122.22 ],[ 7.2574 , 52. , 8.28813559, ..., 2.80225989,37.85 , -122.24 ],...,[ 1.7 , 17. , 5.20554273, ..., 2.3256351 , 39.43 , -121.22 ],[ 1.8672 , 18. , 5.32951289, ..., 2.12320917, 39.43 , -121.32 ],[ 2.3886 , 16. , 5.25471698, ..., 2.61698113, 39.37 , -121.24 ]]), 'target': array([4.526, 3.585, 3.521, ..., 0.923, 0.847, 0.894]), 'feature_names': ['MedInc', 'HouseAge', 'AveRooms', 'AveBedrms', 'Population', 'AveOccup', 'Latitude', 'Longitude'], 'DESCR': '.. _california_housing_dataset:\n\nCalifornia Housing dataset\n--------------------------\n\n**Data Set Characteristics:**\n\n :Number of Instances: 20640\n\n :Number of Attributes: 8 numeric, predictive attributes and the target\n\n :Attribute Information:\n - MedInc median income in block\n - HouseAge median house age in block\n - AveRooms average number of rooms\n - AveBedrms average number of bedrooms\n - Population block population\n - AveOccup average house occupancy\n - Latitude house block latitude\n - Longitude house block longitude\n\n :Missing Attribute Values: None\n\nThis dataset was obtained from the StatLib repository.\nhttp://lib.stat.cmu.edu/datasets/\n\nThe target variable is the median house value for California districts.\n\nThis dataset was derived from the 1990 U.S. census, using one row per census\nblock group. A block group is the smallest geographical unit for which the U.S.\nCensus Bureau publishes sample data (a block group typically has a population\nof 600 to 3,000 people).\n\nIt can be downloaded/loaded using the\n:func:`sklearn.datasets.fetch_california_housing` function.\n\n.. topic:: References\n\n - Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,\n Statistics and Probability Letters, 33 (1997) 291-297\n'}"""
##################################################################################################
from sklearn.model_selection import train_test_split#housing.data对应x_train_all,x_test
#housing.target对应y_train_all,y_test
x_train_all, x_test, y_train_all, y_test = train_test_split(housing.data, housing.target, random_state = 7)print(x_train_all)
print(x_train_all.shape)
"""output:[[ 1.725 52. 3.38621586 ... 2.3381014337.88 -122.26 ][ 3.312 17. 6.45175767 ... 2.6155572233.72 -116.37 ][ 1.2469 33. 4.77325581 ... 3.3662790735.37 -119.01 ]...[ 4.4821 42. 4.45 ... 2.77534.24 -118.26 ][ 11.075 38. 7.20467836 ... 2.3976608233.61 -117.91 ][ 3.4333 27. 3.44308036 ... 1.6216517937.57 -122.33 ]](15480, 8) 有15480个train样本,每个样本有8个特征 """
print(y_train_all)
print(y_train_all.shape)
"""output:[2.875 1.213 0.467 ... 2.719 5.00001 2.9 ](15480,) 每个train样本对应的label """
print(x_test)
print(x_test.shape)
"""output:[[ 2.0278 31. 2.8469285 ... 3.1289023234.06 -118.31 ][ 4.3056 30. 5.03693182 ... 2.5710227333.81 -118.31 ][ 3.675 52. 4.85135135 ... 2.0231660237.81 -122.24 ]...[ 3.1083 40. 4.76567657 ... 2.9075907634.04 -118.13 ][ 2.0775 44. 3.05315615 ... 4.3887043236.9 -121.76 ][ 5.6152 38. 5.98951049 ... 2.7237762234.12 -118.09 ]](5160, 8) 有5160个test样本,每个样本有8个特征"""
print(y_test)
print(y_test.shape)
"""output:[3.6 3.36 2.699 ... 2.205 1.214 3.599](5160,) 每个test样本对应的label """#x_train_all对应x_train,x_valid
#y_train_all对应y_train,y_valid
x_train, x_valid, y_train, y_valid = train_test_split(x_train_all, y_train_all, random_state = 11)print(x_train.shape, y_train.shape)
print(x_valid.shape, y_valid.shape)
print(x_test.shape, y_test.shape)
"""output:(11610, 8) (11610,) x_train(3870, 8) (3870,) x_valid(5160, 8) (5160,) x_test """##################################################################################################
#归一化数据 (形成均值为0方差为1的数据样本)
from sklearn.preprocessing import StandardScalerscaler = StandardScaler()
x_train_scaled = scaler.fit_transform(x_train)
x_valid_scaled = scaler.transform(x_valid)
x_test_scaled = scaler.transform(x_test)##################################################################################################
output_dir = "generate_csv"
if not os.path.exists(output_dir):os.mkdir(output_dir)#np.c_可以把数据按行进行merge
train_data = np.c_[x_train_scaled, y_train]
valid_data = np.c_[x_valid_scaled, y_valid]
test_data = np.c_[x_test_scaled, y_test]"""#join的使用#用于将序列中的元素以指定的字符连接生成一个新的字符串。str = "-";seq = ("a", "b", "c"); # 字符串序列print (str.join( seq )); #a-b-c """
header_str = ",".join(housing.feature_names + ["MidianHouseValue"])def save_to_csv(output_dir, #输出目录data, #存储的数据name_prefix,#表示是train还是test数据header=None,n_parts=10): #表示数据切分为10部分path_format = os.path.join(output_dir, "{}_{:02d}.csv")#声明的格式#生成切分的文件名,第一个{}里放train或test,第二个放的是两位的整数filenames = []#存储文件名for file_idx, row_indices in enumerate(np.array_split(np.arange(len(data)), n_parts)#生成和样本data一样大的一维数组,然后分成n_parts份,每一份数组存放是索引,然后可以根据索引去取数据#row_indices表示每一份数组的索引#file_idex表示索引,相当于for i从0开始递增):part_csv = path_format.format(name_prefix, file_idx)#调用格式,生成文件名filenames.append(part_csv)#打开新建的子文件with open(part_csv, "wt", encoding="utf-8") as f:if header is not None:f.write(header + "\n")for row_index in row_indices:f.write(",".join([repr(col)for col in data[row_index]]))f.write('\n')return filenamestrain_filenames = save_to_csv(output_dir, train_data, "train",header_str, n_parts=20)
valid_filenames = save_to_csv(output_dir, valid_data, "valid",header_str, n_parts=10)
test_filenames = save_to_csv(output_dir, test_data, "test",header_str, n_parts=10)
""" print()和pprint()都是python的打印模块,功能基本一样, 唯一的区别就是pprint()模块打印出来的数据结构更加完整,每行为一个数据结构,更加方便阅读打印输出结果。 特别是对于特别长的数据打印, print()输出结果都在一行,不方便查看, 而pprint()采用分行打印输出,所以对于数据结构比较复杂、数据长度较长的数据,适合采用pprint()打印方式。 当然,一般情况多数采用print()。 """import pprint
print("train filenames:")
pprint.pprint(train_filenames)
print("valid filenames:")
pprint.pprint(valid_filenames)
print("test filenames:")
pprint.pprint(test_filenames)"""output:train filenames:['generate_csv\\train_00.csv','generate_csv\\train_01.csv','generate_csv\\train_02.csv','generate_csv\\train_03.csv','generate_csv\\train_04.csv','generate_csv\\train_05.csv','generate_csv\\train_06.csv','generate_csv\\train_07.csv','generate_csv\\train_08.csv','generate_csv\\train_09.csv','generate_csv\\train_10.csv','generate_csv\\train_11.csv','generate_csv\\train_12.csv','generate_csv\\train_13.csv','generate_csv\\train_14.csv','generate_csv\\train_15.csv','generate_csv\\train_16.csv','generate_csv\\train_17.csv','generate_csv\\train_18.csv','generate_csv\\train_19.csv']valid filenames:['generate_csv\\valid_00.csv','generate_csv\\valid_01.csv','generate_csv\\valid_02.csv','generate_csv\\valid_03.csv','generate_csv\\valid_04.csv','generate_csv\\valid_05.csv','generate_csv\\valid_06.csv','generate_csv\\valid_07.csv','generate_csv\\valid_08.csv','generate_csv\\valid_09.csv']test filenames:['generate_csv\\test_00.csv','generate_csv\\test_01.csv','generate_csv\\test_02.csv','generate_csv\\test_03.csv','generate_csv\\test_04.csv','generate_csv\\test_05.csv','generate_csv\\test_06.csv','generate_csv\\test_07.csv','generate_csv\\test_08.csv','generate_csv\\test_09.csv']"""##################################################################################################
"""从多个csv文件中读取dataset1.新建一个包含文件名的dataset2.从文件名中读文件(生成一个字符串包含各个特征的值以,分开)3.将csv文件(多个字符串)中的内容解析出来"""#1.
# 包含文件名的dataset
filename_dataset = tf.data.Dataset.list_files(train_filenames)#[1.csv,2.csv,3.csv] --> [tf.tensor,tf.tensor,tf.tensor]
for filename in filename_dataset:print(filename)"""output:tf.Tensor(b'generate_csv\\train_01.csv', shape=(), dtype=string)tf.Tensor(b'generate_csv\\train_18.csv', shape=(), dtype=string)tf.Tensor(b'generate_csv\\train_02.csv', shape=(), dtype=string)tf.Tensor(b'generate_csv\\train_19.csv', shape=(), dtype=string)tf.Tensor(b'generate_csv\\train_03.csv', shape=(), dtype=string)tf.Tensor(b'generate_csv\\train_07.csv', shape=(), dtype=string)tf.Tensor(b'generate_csv\\train_11.csv', shape=(), dtype=string)tf.Tensor(b'generate_csv\\train_05.csv', shape=(), dtype=string)tf.Tensor(b'generate_csv\\train_12.csv', shape=(), dtype=string)tf.Tensor(b'generate_csv\\train_04.csv', shape=(), dtype=string)tf.Tensor(b'generate_csv\\train_15.csv', shape=(), dtype=string)tf.Tensor(b'generate_csv\\train_16.csv', shape=(), dtype=string)tf.Tensor(b'generate_csv\\train_13.csv', shape=(), dtype=string)tf.Tensor(b'generate_csv\\train_06.csv', shape=(), dtype=string)tf.Tensor(b'generate_csv\\train_09.csv', shape=(), dtype=string)tf.Tensor(b'generate_csv\\train_10.csv', shape=(), dtype=string)tf.Tensor(b'generate_csv\\train_14.csv', shape=(), dtype=string)tf.Tensor(b'generate_csv\\train_00.csv', shape=(), dtype=string)tf.Tensor(b'generate_csv\\train_08.csv', shape=(), dtype=string)tf.Tensor(b'generate_csv\\train_17.csv', shape=(), dtype=string)"""
#2.
n_readers = 5
#遍历filename_dataset中的每个元素(每个csv文件),读取文件中的数据,再把多个文件合并
dataset = filename_dataset.interleave(lambda filename: tf.data.TextLineDataset(filename).skip(1),#转换函数#用于读取文件的api,可以把文本的内容(按行进行读取文本)生成一个dataset#skip(1)表示去掉文件头cycle_length = n_readers #并行处理,5个
)print(dataset.take(1))
"""output:<DatasetV1Adapter shapes: (), types: tf.string> """for line in dataset.take(5):print(line.numpy())#由于line是tensor结构的数据,调用numpy可以更可视化"""output:b'0.6303435674178064,1.874166156711919,-0.06713214279531016,-0.12543366804152128,-0.19737553788322462,-0.022722631725889016,-0.692407235065288,0.7265233438487496,2.419'b'-0.8219588176953616,1.874166156711919,0.18212349433218608,-0.03170019246279883,-0.6011178900722581,-0.14337494105109344,1.0852205298015787,-0.8613994495208361,1.054'b'0.04971034572063198,-0.8492418886278699,-0.06214699417830008,0.17878747064657746,-0.8025354230744277,0.0005066066922077538,0.6466457006743215,-1.1060793768010604,2.286'b'0.15782311132800697,0.43236189741438374,0.3379948076652917,-0.015880306122244434,-0.3733890577139493,-0.05305245634489608,0.8006134598360177,-1.2359095422966828,3.169'b'0.401276648075221,-0.9293421252555106,-0.05333050451405854,-0.1865945262276826,0.6545661895448709,0.026434465728210874,0.9312527706398824,-1.4406417263474771,2.512' """#3.# tf.io.decode_csv(str, record_defaults) 第一个参数:由逗号分隔的csv文件(字符串),第二个参数:字符串中各个field的类型是什么
# 实例:
sample_str = '1,2,3,4,5'# record_defaults = [tf.constant(0,dtype=tf.int32)] * 5 #5个整型
# parsed_fields = tf.io.decode_csv(sample_str, record_defaults)record_defaults = [tf.constant(0, dtype=tf.int32), #常量0,#具体的值np.nan,#NAN"hello",#strtf.constant([])#constant,没有固定类型,默认是float32
]
#record_defaults的参数必须要和待解析的csv文件中filed的类型相同,并且个数要一样parsed_fields = tf.io.decode_csv(sample_str, record_defaults)
print(parsed_fields)"""output:[<tf.Tensor: id=92, shape=(), dtype=int32, numpy=1>, <tf.Tensor: id=93, shape=(), dtype=int32, numpy=2>, <tf.Tensor: id=94, shape=(), dtype=float32, numpy=3.0>, <tf.Tensor: id=95, shape=(), dtype=string, numpy=b'4'>, <tf.Tensor: id=96, shape=(), dtype=float32, numpy=5.0>]"""##################################################################################################
def parse_csv_line(line, n_fields = 9):#line为csv文件(str),n_fields表示有多少个filed(以逗号分开的就是)defs = [tf.constant(np.nan)] * n_fields #生成n_fields个默认为float32的常量parsed_fields = tf.io.decode_csv(line, record_defaults=defs)#parsed_fields 为解析之后相当于分开的fields,每个都为tensor结构#有9个元素,其中前8个为x,最后一个为y# 将前8个变成一个向量,最有一个变成一个向量x = tf.stack(parsed_fields[0:-1])y = tf.stack(parsed_fields[-1:])return x, y
#测试
print(parse_csv_line(b'-0.9,0.83,-0.1,-0.14,-0.4,-0.1,1.6,-0.74,1.1',n_fields = 9))"""output:(<tf.Tensor: id=108, shape=(8,), dtype=float32, numpy= array([-0.9 , 0.83, -0.1 , -0.14, -0.4 , -0.1 , 1.6 , -0.74], dtype=float32)>, <tf.Tensor: id=109, shape=(1,), dtype=float32, numpy=array([1.1], dtype=float32)>) """
##################################################################################################
# 定义一个csv文件转成dataset数据集
# 1. filename -> dataset
# 2. read file -> dataset -> datasets -> merge
# 3. parse csv
def csv_reader_dataset(filenames, n_readers=5,batch_size=32, n_parse_threads=5,shuffle_buffer_size=10000):dataset = tf.data.Dataset.list_files(filenames) #dataset存放的是多个csv文件的tensor结构dataset = dataset.repeat()dataset = dataset.interleave(lambda filename: tf.data.TextLineDataset(filename).skip(1),cycle_length = n_readers)# 遍历filename_dataset中的每个元素(每个csv文件),读取文件中的数据,再把多个文件合并dataset.shuffle(shuffle_buffer_size) #随机洗牌#map 把每一个str 通过parse_csv_line转换dataset = dataset.map(parse_csv_line,num_parallel_calls=n_parse_threads) #生成的每个数据都有x和ydataset = dataset.batch(batch_size) #每batch_size个分为1个数据return datasettrain_set = csv_reader_dataset(train_filenames, batch_size=3)for x_batch, y_batch in train_set.take(1):print("x:")pprint.pprint(x_batch)print("y:")pprint.pprint(y_batch)
"""output:x:<tf.Tensor: id=193, shape=(3, 8), dtype=float32, numpy = array([[-0.66722274, -0.04823952, 0.34529406, 0.53826684, 1.8521839 ,-0.06112538, -0.8417093 , 1.5204847 ],[ 0.8015443 , 0.27216142, -0.11624393, -0.20231152, -0.5430516 ,-0.02103962, -0.5897621 , -0.08241846],[ 0.09734604, 0.75276285, -0.20218964, -0.19547 , -0.40605137,0.00678553, -0.81371516, 0.6566148 ]], dtype=float32)>y:<tf.Tensor: id=194, shape=(3, 1), dtype=float32, numpy=array([[1.59 ],[3.226],[1.119]], dtype=float32)>"""##################################################################################################batch_size = 32
train_set = csv_reader_dataset(train_filenames,batch_size = batch_size)
valid_set = csv_reader_dataset(valid_filenames,batch_size = batch_size)
test_set = csv_reader_dataset(test_filenames,batch_size = batch_size)model = keras.models.Sequential([keras.layers.Dense(30, activation='relu',input_shape=[8]),#定义一层有30个节点的中间层keras.layers.Dense(1),#全连接层,输出一个值
])model.compile(loss="mean_squared_error", optimizer="sgd")callbacks = [keras.callbacks.EarlyStopping(patience=5, min_delta=1e-2)]history = model.fit(train_set,validation_data = valid_set,steps_per_epoch = 11160 // batch_size,validation_steps = 3870 // batch_size,epochs = 100,callbacks = callbacks)model.evaluate(test_set, steps = 5160 // batch_size)
详细解决方案
Tensorflow-datasetI使用_2.dataset转为csv文件,读取csv文件并训练生成model
热度:53 发布时间:2023-09-07 03:16:00.0
相关解决方案
- javaBean 跟 Model 的关系
- dataset,该怎么处理
- 求大姐帮帮忙 J# 如何样才能从 DataSet 中读取单条数据 J
- 哪位高手能帮忙告诉小弟我borland 2006/2007 的 model view 设计的资料
- 【原创+分享】VS2005水晶报表PUSH模式(DataSet)视频教程+源代码工程示例解决思路
- js里头拼字符串 @Model.VirDir + "views[*0].swf," + @Model.FlvCount + "}"
- @foreach (var doc in Model.Magazine),该如何解决
- @Model.CurrentArticle.PubDate.ToString("yyyy-MM-dd HH:mm")解决方案
- @model IEnumerable<MvcMovie.Models.Movie>该怎么解决
- DataSet 读写 Excel.该怎么处理
- 数据库查询有关问题求教,Dataset 老是为空
- model,该怎么处理
- DataSet 也 DataTable 是什么关系,该如何解决
- 关于datatable,dataset,再有list泛型,到底哪个更好一些?
- 没法将类型“System.Data.DataSet”隐式转换为“System.Collections.Generic.List
- dataset 施行查询为何只返回一个值
- DataSet 怎么 获取DataSet 里的查询数据
- DataSet Update有关问题,从一个数据库读数据利用DataSet插入到另一个数据库表中
- DataSet.Tables 中怎么插入值
- ASP.NET DATASET 导出到excel速度很慢,该如何解决
- asp.net mvc 中 Html.EditorFor 怎么添加 htmlattribute 属性 小弟我想把 @Html.EditorFor(model
- json 转成 Dataset 保存到数据库,该怎么处理
- 关于Ilist<Model> 取值,该如何处理
- 无法将类型“Maticsoft.Model.Login”隐式转换为“WebAppDL.admin.Login,该如何解决
- 求 获取一张表(DataSet)最省资源的代码?该如何解决
- ???????小白请问:未能找到元数据文件“G:\NetFiles\LibrarySystem\Model\bin\Debug\Model.dll”
- DataSet 该在什么时候使用(小弟我真的不懂,请不要嘲笑偶)
- dataset 生成excel 需要 excel组建 但不能装office解决办法
- 大人:关于DataSet写进Xml的有关问题 小弟我在window xp2下 可以。但放到服务器window2003 上 dataset 写不进去xml里
- DATASET 中table 合并的有关问题,解决者50分奉送,第一次