91超碰碰碰碰久久久久久综合_超碰av人澡人澡人澡人澡人掠_国产黄大片在线观看画质优化_txt小说免费全本

溫馨提示×

溫馨提示×

您好,登錄后才能下訂單哦!

密碼登錄×
登錄注冊×
其他方式登錄
點擊 登錄注冊 即表示同意《億速云用戶服務條款》

python實現C4.5決策樹算法

發布時間:2020-09-15 19:21:01 來源:腳本之家 閱讀:224 作者:楊柳岸曉風 欄目:開發技術

C4.5算法使用信息增益率來代替ID3的信息增益進行特征的選擇,克服了信息增益選擇特征時偏向于特征值個數較多的不足。信息增益率的定義如下:

python實現C4.5決策樹算法

# -*- coding: utf-8 -*-


from numpy import *
import math
import copy
import cPickle as pickle


class C45DTree(object):
 def __init__(self): # 構造方法
  self.tree = {} # 生成樹
  self.dataSet = [] # 數據集
  self.labels = [] # 標簽集


 # 數據導入函數
 def loadDataSet(self, path, labels):
  recordList = []
  fp = open(path, "rb") # 讀取文件內容
  content = fp.read()
  fp.close()
  rowList = content.splitlines() # 按行轉換為一維表
  recordList = [row.split("\t") for row in rowList if row.strip()] # strip()函數刪除空格、Tab等
  self.dataSet = recordList
  self.labels = labels


 # 執行決策樹函數
 def train(self):
  labels = copy.deepcopy(self.labels)
  self.tree = self.buildTree(self.dataSet, labels)


 # 構件決策樹:穿件決策樹主程序
 def buildTree(self, dataSet, lables):
  cateList = [data[-1] for data in dataSet] # 抽取源數據集中的決策標簽列
  # 程序終止條件1:如果classList只有一種決策標簽,停止劃分,返回這個決策標簽
  if cateList.count(cateList[0]) == len(cateList):
   return cateList[0]
  # 程序終止條件2:如果數據集的第一個決策標簽只有一個,返回這個標簽
  if len(dataSet[0]) == 1:
   return self.maxCate(cateList)
  # 核心部分
  bestFeat, featValueList= self.getBestFeat(dataSet) # 返回數據集的最優特征軸
  bestFeatLabel = lables[bestFeat]
  tree = {bestFeatLabel: {}}
  del (lables[bestFeat])
  for value in featValueList: # 決策樹遞歸生長
   subLables = lables[:] # 將刪除后的特征類別集建立子類別集
   # 按最優特征列和值分隔數據集
   splitDataset = self.splitDataSet(dataSet, bestFeat, value)
   subTree = self.buildTree(splitDataset, subLables) # 構建子樹
   tree[bestFeatLabel][value] = subTree
  return tree


 # 計算出現次數最多的類別標簽
 def maxCate(self, cateList):
  items = dict([(cateList.count(i), i) for i in cateList])
  return items[max(items.keys())]


 # 計算最優特征
 def getBestFeat(self, dataSet):
  Num_Feats = len(dataSet[0][:-1])
  totality = len(dataSet)
  BaseEntropy = self.computeEntropy(dataSet)
  ConditionEntropy = []  # 初始化條件熵
  slpitInfo = [] # for C4.5,caculate gain ratio
  allFeatVList = []
  for f in xrange(Num_Feats):
   featList = [example[f] for example in dataSet]
   [splitI, featureValueList] = self.computeSplitInfo(featList)
   allFeatVList.append(featureValueList)
   slpitInfo.append(splitI)
   resultGain = 0.0
   for value in featureValueList:
    subSet = self.splitDataSet(dataSet, f, value)
    appearNum = float(len(subSet))
    subEntropy = self.computeEntropy(subSet)
    resultGain += (appearNum/totality)*subEntropy
   ConditionEntropy.append(resultGain) # 總條件熵
  infoGainArray = BaseEntropy*ones(Num_Feats)-array(ConditionEntropy)
  infoGainRatio = infoGainArray/array(slpitInfo) # C4.5信息增益的計算
  bestFeatureIndex = argsort(-infoGainRatio)[0]
  return bestFeatureIndex, allFeatVList[bestFeatureIndex]

 # 計算劃分信息
 def computeSplitInfo(self, featureVList):
  numEntries = len(featureVList)
  featureVauleSetList = list(set(featureVList))
  valueCounts = [featureVList.count(featVec) for featVec in featureVauleSetList]
  pList = [float(item)/numEntries for item in valueCounts]
  lList = [item*math.log(item, 2) for item in pList]
  splitInfo = -sum(lList)
  return splitInfo, featureVauleSetList


 # 計算信息熵
 # @staticmethod
 def computeEntropy(self, dataSet):
  dataLen = float(len(dataSet))
  cateList = [data[-1] for data in dataSet] # 從數據集中得到類別標簽
  # 得到類別為key、 出現次數value的字典
  items = dict([(i, cateList.count(i)) for i in cateList])
  infoEntropy = 0.0
  for key in items: # 香農熵: = -p*log2(p) --infoEntropy = -prob * log(prob, 2)
   prob = float(items[key]) / dataLen
   infoEntropy -= prob * math.log(prob, 2)
  return infoEntropy


 # 劃分數據集: 分割數據集; 刪除特征軸所在的數據列,返回剩余的數據集
 # dataSet : 數據集; axis: 特征軸; value: 特征軸的取值
 def splitDataSet(self, dataSet, axis, value):
  rtnList = []
  for featVec in dataSet:
   if featVec[axis] == value:
    rFeatVec = featVec[:axis] # list操作:提取0~(axis-1)的元素
    rFeatVec.extend(featVec[axis + 1:]) # 將特征軸之后的元素加回
    rtnList.append(rFeatVec)
  return rtnList

 # 存取樹到文件
 def storetree(self, inputTree, filename):
  fw = open(filename,'w')
  pickle.dump(inputTree, fw)
  fw.close()

 # 從文件抓取樹
 def grabTree(self, filename):
  fr = open(filename)
  return pickle.load(fr)

調用代碼

# -*- coding: utf-8 -*-

from numpy import *
from C45DTree import *

dtree = C45DTree()
dtree.loadDataSet("dataset.dat",["age", "revenue", "student", "credit"])
dtree.train()

dtree.storetree(dtree.tree, "data.tree")
mytree = dtree.grabTree("data.tree")
print mytree

以上就是本文的全部內容,希望對大家的學習有所幫助,也希望大家多多支持億速云。

向AI問一下細節

免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。

AI

阿拉善左旗| 兴国县| 兴宁市| 和田县| 扬州市| 康平县| 拉孜县| 柳州市| 桦川县| 司法| 宜兴市| 叙永县| 大余县| 牡丹江市| 轮台县| 武鸣县| 图木舒克市| 子长县| 前郭尔| 天水市| 永城市| 方正县| 巴林右旗| 扎赉特旗| 扬州市| 报价| 青浦区| 陵川县| 平泉县| 龙门县| 徐汇区| 京山县| 罗甸县| 盖州市| 襄樊市| 曲松县| 繁峙县| 乐安县| 五莲县| 镇雄县| 南涧|