貝葉斯分類:在做算法時數據不能為負我就將原來數據中的負號去掉導致結果預測失敗
優點:在數據較少的情況下仍然有效,可以處理多類別問題。
缺點:對于輸入數據的準備方式較為敏感。 適用數據類型:標稱型數據。
#1.導包
from pyspark.ml.classification import NaiveBayes
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.mllib.linalg import Vectors,Vector
from pyspark import SparkContext
from pyspark.ml.regression import LinearRegression
from pyspark.ml.feature import VectorAssembler
from pyspark.python.pyspark.shell import spark
from pyspark.ml.feature import StringIndexer
from pyspark.sql.types import *
from pyspark.sql.functions import *
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.clustering import KMeans
#2.讀取hdfs中的文件
sc=SparkContext.getOrCreate()
train_data=sc.textFile("hdfs://master:9000/qw.csv")
def GetParts(line):parts = line.split(',')return parts[0],parts[1],parts[2],parts[3],parts[4],parts[5],parts[6]
header = train_data.first() #獲取第一行內容
train_data = train_data.filter(lambda row:row != header) #刪除第一行數據
train = train_data.map(lambda line: GetParts(line))
df = spark.createDataFrame(train,["acceleration_x","acceleration_y","acceleration_z","gyro_x","gyro_y","gyro_z","activity"])#將數據轉化為DataFrame格式
df.show()
#將string類型轉化為浮點型
df = df.withColumn("acceleration_x", df["acceleration_x"].cast(FloatType()))
df = df.withColumn("acceleration_y", df["acceleration_y"].cast(FloatType()))
df = df.withColumn("acceleration_z", df["acceleration_z"].cast(FloatType()))
df = df.withColumn("gyro_x", df["gyro_x"].cast(FloatType()))
df = df.withColumn("gyro_y", df["gyro_y"].cast(FloatType()))
df = df.withColumn("gyro_z", df["gyro_z"].cast(FloatType()))
df = df.withColumn("activity", df["activity"].cast(FloatType()))
#將數據劃分為特征和標簽
assembler = VectorAssembler(inputCols=["acceleration_x","acceleration_y","acceleration_z","gyro_x","gyro_y","gyro_z"],outputCol="features")
output = assembler.transform(df)
label_features = output.select("features", "activity").toDF('features','label')
label_features.show(truncate=False)
#貝葉斯
nb = NaiveBayes(smoothing=1.0, modelType="multinomial")
# 訓練模型
model = nb.fit(label_features)df1 = spark.createDataFrame([(-1.0602,-0.282,-0.0618,0.8069,-0.9107,1.6153,1)],["acceleration_x","acceleration_y","acceleration_z","gyro_x","gyro_y","gyro_z","activity"])
df1.show()
test_assembler = VectorAssembler(inputCols=["acceleration_x","acceleration_y","acceleration_z","gyro_x","gyro_y","gyro_z"],outputCol="features")
test_output = test_assembler.transform(df1)
test_label_features = test_output.select("features", "activity").toDF('features','label')
test_label_features.show(truncate=False)# df1 = label_features.head(5)
# df1 = spark.createDataFrame(df1)
# df1.show()
# compute accuracy on the test set
result = model.transform(test_label_features)
print(result.collect())
predictionAndLabels = result.select("prediction", "label").collect()
print(predictionAndLabels)
總結
以上是生活随笔為你收集整理的python spark pyspark——朴素贝叶斯习题整理的全部內容,希望文章能夠幫你解決所遇到的問題。
如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。