{"id":336,"date":"2020-08-08T13:52:33","date_gmt":"2020-08-08T04:52:33","guid":{"rendered":"http:\/\/cedartrees.co.kr\/?p=336"},"modified":"2021-04-03T19:16:50","modified_gmt":"2021-04-03T10:16:50","slug":"google-colab-gpu-text-classification","status":"publish","type":"post","link":"http:\/\/blog.cedartrees.co.kr\/index.php\/2020\/08\/08\/google-colab-gpu-text-classification\/","title":{"rendered":"Google Colab GPU Text-classification"},"content":{"rendered":"\n<p>Colaboratory(\ud639\uc740 Colab)\ub97c \uc0ac\uc6a9\ud558\uba74 \ube0c\ub77c\uc6b0\uc800\uc5d0\uc11c Python\uc744 \uc791\uc131\ud558\uace0 \uc2e4\ud589\ud560 \uc218 \uc788\uc2b5\ub2c8\ub2e4. \uc7a5\uc810\uc774\ub77c\uba74 \ubcc4\ub3c4\uc758 \uad6c\uc131\uc774 \ud544\uc694 \uc5c6\uace0 \ubb34\ub8cc\ub85c GPU\ub97c \uc0ac\uc6a9\ud560 \uc218 \uc788\ub2e4\ub294 \uc7a5\uc810\uc774 \uc788\uc2b5\ub2c8\ub2e4. \ub610 \ub9cc\ub4e0 \ucf54\ub4dc\ub97c \uac04\ub2e8\ud558\uac8c \uacf5\uc720\ud560 \uc218\ub3c4 \uc788\uc2b5\ub2c8\ub2e4.<\/p>\n\n\n\n<figure class=\"wp-block-embed-youtube wp-block-embed is-type-video is-provider-youtube wp-embed-aspect-16-9 wp-has-aspect-ratio\"><div class=\"wp-block-embed__wrapper\">\n<iframe loading=\"lazy\" title=\"Get started with Google Colaboratory (Coding TensorFlow)\" width=\"525\" height=\"295\" src=\"https:\/\/www.youtube.com\/embed\/inN8seMm7UI?feature=oembed\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen><\/iframe>\n<\/div><\/figure>\n\n\n\n<p>\uac10\uc131\ubd84\uc11d(Text Classification)\uc5d0 \uc0ac\uc6a9\ud55c \ub370\uc774\ud130\ub294 \ub124\uc774\ubc84\uc5d0\uc11c \uacf5\uac1c\ud55c \uc601\ud654 \ud3c9\uc810 \uc815\ubcf4\uc785\ub2c8\ub2e4. \ud574\ub2f9 \ub370\uc774\ud130\ub294 \uc544\ub798 \ub9c1\ud06c\uc5d0\uc11c \ubc1b\uc744 \uc218 \uc788\uc2b5\ub2c8\ub2e4.<br><a href=\"https:\/\/github.com\/e9t\/nsmc\">https:\/\/github.com\/e9t\/nsmc<\/a><\/p>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\">import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\nimport io<\/pre>\n\n\n\n<p>\ubcf8 \uc608\uc81c\uc5d0\uc11c \uc0ac\uc6a9\ud560 konlpy\ub97c Colab\uc5d0 \uc124\uce58\ud569\ub2c8\ub2e4.<\/p>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\">!pip install konlpy<\/pre>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\">from konlpy.tag import Okt\nokt = Okt()<\/pre>\n\n\n\n<p>Colab\uc5d0\uc11c \uc0ac\uc6a9\ud560 \ud30c\uc77c\uc744 \uc0ac\uc6a9\uc790 \uacc4\uc815\uc758 \uad6c\uae00 \ub4dc\ub77c\uc774\ube0c\uc5d0 \uc5c5\ub85c\ub4dc\ud569\ub2c8\ub2e4. \uadf8\ub9ac\uace0 \uc5c5\ub85c\ub4dc\ud55c \ud30c\uc77c \uc815\ubcf4\ub97c Colab\uc5d0\uc11c \uc77d\uc744 \uc218 \uc788\ub3c4\ub85d \ud544\uc694\ud55c python \ub77c\uc774\ube0c\ub7ec\ub9ac\ub97c \ub4f1\ub85d\ud574\uc57c\ud569\ub2c8\ub2e4. \uc544\ub798 \ucf54\ub4dc\ub97c \uc2e4\ud589\ud558\uba74 \uad6c\uae00 \uacc4\uc815\uc5d0 \uc778\uc99d\ud560 \uc218 \uc788\ub294 \uc815\ubcf4\uac00 \ub098\uc624\uace0 \ud0a4\uac12\uc744 \uc785\ub825\ud558\uba74 \ub4dc\ub77c\uc774\ube0c\uc5d0 \uc811\uadfc\ud560 \uc218 \uc788\uc2b5\ub2c8\ub2e4.<\/p>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\">from google.colab import drive\ndrive.mount('\/content\/gdrive')<\/pre>\n\n\n\n<p>\uad6c\uae00 \ub4dc\ub77c\uc774\ube0c\uc5d0 \uc811\uadfc\uc774 \uc644\ub8cc\ub418\uba74 \ud30c\uc77c\uc774 \uc788\ub294 \ub514\ub809\ud1a0\ub9ac \uc704\uce58\ub85c \uc774\ub3d9\ud569\ub2c8\ub2e4. <\/p>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\">from google.colab import drive\ndrive.mount('\/content\/gdrive')\n%cd gdrive\/My\\ Drive\/Colab\\ Notebooks<\/pre>\n\n\n\n<p>\ud574\ub2f9 \uc704\uce58\ub85c \uc774\ub3d9\ud55c \ud6c4\uc5d0 %ls \uba85\ub839\uc744 \uc2e4\ud589\uc2dc\ucf1c\ubcf4\uba74 \ud574\ub2f9 \ud3f4\ub354\uc5d0 \uc788\ub294 \ud30c\uc77c \ub9ac\uc2a4\ud2b8\ub97c \ud45c\uc2dc\ud574\uc90d\ub2c8\ub2e4. \ud30c\uc77c \uc911\uc5d0\uc11c \ud559\uc2b5\uc5d0 \uc0ac\uc6a9\ud560 \ud30c\uc77c\uc744 open\ud558\uba74 \ub429\ub2c8\ub2e4.<\/p>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\">%ls<\/pre>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\">def read_data(filename):\n    with io.open(filename, 'r',encoding='utf-8') as f:\n        data = [line for line in f.read().splitlines()]\n        data = data[1:]\n    return data \n\nsentences = []\n# \ud14c\uc2a4\ud2b8\ub97c \uc704\ud574 \uae38\uc774\uac00 30 \uc774\ud558\uc778 \ubb38\uc7a5\uc744 \uc77d\uc74c\nfor sentence in read_data('.\/ratings_test.txt'):\n  if len(sentence) &lt;= 30:\n    sentences.append(sentence)<\/pre>\n\n\n\n<p>\ud574\ub2f9 \ud30c\uc77c\uc744 \uc77d\uc5b4\ubcf4\uba74 \uc544\ub798\uc640 \uac19\uc740 \ud615\ud0dc\ub85c \ub370\uc774\ud130\uac00 \uad6c\uc131\ub418\uc5b4 \uc788\uc2b5\ub2c8\ub2e4. \uc774\uc804 \uc608\uc81c\uc5d0\uc11c \uc124\uba85\ud588\ub358 \uac83\ucc98\ub7fc 1\uc740 \uae0d\uc815\uc801\uc778 \ub2f5\ubcc0\uc744 0\uc740 \ubd80\uc815\uc801\uc778 \ub2f5\ubcc0\uc744 \uc758\ubbf8\ud569\ub2c8\ub2e4.<\/p>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\">['6270596|\uad73 \u314b|1',\n '7898805|\uc74c\uc545\uc774 \uc8fc\uac00 \ub41c, \ucd5c\uace0\uc758 \uc74c\uc545\uc601\ud654|1',\n '6315043|\uc9c4\uc815\ud55c \uc4f0\ub808\uae30|0',\n '7462111|\uad1c\ucc2e\ub124\uc694\uc624\ub79c\ub9cc\ud3ec\ucf13\ubaac\uc2a4\ud130\uc7bc\ubc0c\uc5b4\uc694|1',\n '10268521|\uc18c\uc704 \u3148\ubb38\uac00\ub77c\ub294 \ud3c9\uc810\uc740 \ubb50\ub0d0?|1' ...<\/pre>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\">class Vocab():\n    def __init__(self):\n        self.vocab2index = {'&lt;pad>':0,'&lt;unk>':1} # padding 0, unkown 1\n        self.index2vocab = {0:'&lt;pad>',1:'&lt;unk>'} # 0 padding, 1 unkown\n        self.vocab_count = {}\n        self.n_vocab = len(self.vocab2index)\n        \n    def add_vocab(self, sentence):\n        for word in sentence:\n            if word not in self.vocab2index:\n                self.vocab2index[word] = self.n_vocab\n                self.index2vocab[self.n_vocab] = word\n                self.vocab_count[word] = 1\n                self.n_vocab += 1\n            else:\n                self.vocab_count[word] += 1\n\nvo = Vocab()\n\ndef charStrip(s):\n    s = s.replace('\"','').replace('\u300c','').replace('\u300d','').replace('\u201c','').replace('?','').replace('\u201d','')\n    s = s.replace('(',' ').replace(')',' ').replace('\u2018','').replace('\u2019','').replace('\u25a1','').replace('\u25c6','').replace('\u25c7','')\n    s = s.replace('[',' ').replace(']',' ').replace('\u25cb','').replace('\u25b3','').replace('\u25ce','').replace('\u25a3','').replace('\u25c7','')\n    s = s.replace('.',' ').replace('*',' ').replace('.',' ').replace('~',' ')\n    \n    return s\n\nx = [] # text sentence\ny = [] # label\nfor sentence in sentences:\n    arr = sentence.split('|')\n    if(len(arr) == 3):\n      sentence = okt.morphs(charStrip(arr[1]))\n      \n      vo.add_vocab(sentence)\n      x.append(sentence) \n      y.append(float(arr[2])) <\/pre>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\">MAX_SEQUENCE_LENGTH = 0\n\nfor sentence in x:\n    if MAX_SEQUENCE_LENGTH &lt; len(sentence): MAX_SEQUENCE_LENGTH = len(sentence)\n\nMAX_SEQUENCE_LENGTH<\/pre>\n\n\n\n<p>\ub370\uc774\ud130 \uc911\uc5d0\uc11c \uac00\uc7a5 \uae34 \ubb38\uc7a5\uc744 \ud655\uc778\ud574\ubd05\ub2c8\ub2e4. \uc774 \ubb38\uc7a5\uc758 \ud06c\uae30\uac00 Sequence Length\uac00 \ub429\ub2c8\ub2e4. \uc774 \ubb38\uc7a5\uc758 \uae38\uc774\ubcf4\ub2e4 \uc791\uc740 \ubb38\uc7a5\uc758 \uacbd\uc6b0 \ube48\uce78\uc740 &lt;unk> \uac12\uc73c\ub85c \ucc44\uc6cc\uc90d\ub2c8\ub2e4. <\/p>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\">def tensorize(vocab, sentence):\n    idx = [vocab.vocab2index[word] for word in sentence]\n    #return torch.Tensor(idx).long().item()\n    return idx\n\ntmp_tensor = []\nfor sentence in x:\n    tmp = tensorize(vo, sentence)\n    tmp_zero = np.zeros(MAX_SEQUENCE_LENGTH)\n    \n    for i,val in enumerate(tmp):\n        tmp_zero[i] = val\n        \n    tmp_tensor.append(tmp_zero)\n    \nx_data = torch.Tensor(tmp_tensor).long()\ny_data = torch.Tensor([float(t) for t in y])<\/pre>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\">device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nprint(device)\noutput : device(type='cuda')<\/pre>\n\n\n\n<p>\uad6c\uae00 Colab\uc5d0\uc11c\ub294 GPU\ub97c \uc0ac\uc6a9\ud560 \uc218 \uc788\uae30 \ub54c\ubb38\uc5d0 \ud544\uc694\ud55c \uc124\uc815\uc744 \ud574\uc90d\ub2c8\ub2e4. CPU\ub85c \uc5f0\uc0b0\ud560 \ub54c\ubcf4\ub2e4 \ud6e8\uc52c \ube60\ub978 \uacc4\uc0b0\uc18d\ub3c4\ub97c \ubcf4\uc5ec\uc90d\ub2c8\ub2e4. <\/p>\n\n\n\n<p>\ud559\uc2b5\uc6a9 \ub370\uc774\ud130\ub294 8\/2\ub85c \ud559\uc2b5\uc6a9 Train Data \/ Valid Data \ub370\uc774\ud130\ub85c \ub098\ub220\uc90d\ub2c8\ub2e4. <\/p>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\">DATA_LENGTH = x_data.size(0)\n\ntrain_cnt = int(DATA_LENGTH*.8)\nvalid_cnt = DATA_LENGTH - train_cnt\nprint('train_cnt, valid_cnt = ',train_cnt,valid_cnt)\n\nidx = torch.randperm(DATA_LENGTH)\nx_train = torch.index_select(x_data, dim=0, index=idx).to(device).split([train_cnt, valid_cnt], dim=0)\ny_train = torch.index_select(y_data, dim=0, index=idx).to(device).split([train_cnt, valid_cnt], dim=0)<\/pre>\n\n\n\n<p>\uac01 \ub370\uc774\ud130\uc14b\uc740 pytorch\uc758 Dataset, DataLoader\ub97c \uc0ac\uc6a9\ud574\uc11c \ubc30\uce58\uc0ac\uc774\uc988\ub85c \ub098\ub220\uc90d\ub2c8\ub2e4. \ud559\uc2b5\ud560 \ub370\uc774\ud130\uac00 \ub9ce\uc740 \uacbd\uc6b0\uc5d0 \ub9ce\uc740 \ub370\uc774\ud130\ub97c \ud55c\ubc88\uc5d0 \uc77d\uc73c\uba74 \uba54\ubaa8\ub9ac \ubd80\uc871\ud604\uc0c1\uc774 \ubc1c\uc0dd\ud558\ub294\ub370 Dataset\uc744 \ubc30\uce58\uc0ac\uc774\uc988\ub85c \ubd84\ub9ac\ud574\uc11c \ub85c\ub529\ud558\uba74 \uba54\ubaa8\ub9ac\ub97c \uc801\uac8c \uc0ac\uc6a9\ud558\uac8c \ub418\uc5b4 \ud070 \ub370\uc774\ud130\ub3c4 \ud559\uc2b5\ud560 \uc218 \uc788\uc2b5\ub2c8\ub2e4.<\/p>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\">from torch.utils.data import Dataset, DataLoader\n\nclass SimpanDataset(Dataset):\n    \n    def __init__(self, data, label):\n        super().__init__()\n        self.data = data\n        self.labels = label\n        \n    def __len__(self):\n        return len(self.data)\n    \n    def __getitem__(self, idx):\n        return self.data[idx], self.labels[idx]\n    \n# train_loader\ntrain_loader = DataLoader(dataset=SimpanDataset(x_train[0], y_train[0]), batch_size=250, shuffle=True)\nvalid_loader = DataLoader(dataset=SimpanDataset(x_train[1], y_train[1]), batch_size=250, shuffle=False)<\/pre>\n\n\n\n<p>\ud559\uc2b5\uc6a9 \ubaa8\ub378\uc744 \uc544\ub798\uc640 \uac19\uc774 \uc124\uc815\ud569\ub2c8\ub2e4. \ud559\uc2b5 \ubaa8\ub378\uc740 Embedding -> BiLSTM -> Softmax \ub808\uc774\uc5b4\ub97c \ud1b5\uacfc\ud558\uba74\uc11c \ucd5c\uc885 output\uc744 \ub9cc\ub4e4\uc5b4\ub0c5\ub2c8\ub2e4. <br>\ub2e8, output\uc740 \ubaa8\ub4e0 Sequence\uc758 \ub370\uc774\ud130\ub97c \uc0ac\uc6a9\ud558\uc9c0 \uc54a\uace0 \ub9c8\uc9c0\ub9c9 \uc2dc\ud000\uc2a4\uc758 \uac12\ub9cc \uc0ac\uc6a9\ud558\uba70 LSTM\uc758 \ubaa8\ub378\uc5d0\uc11c bidirectional\uc744 True\ub85c \uc124\uc815\ud588\uae30 \ub54c\ubb38\uc5d0 output*2\uc758 \uac12\uc774 \ub9ac\ud134\ub429\ub2c8\ub2e4. \ud559\uc2b5\uc5d0 \ucd5c\uc885 \uacb0\uacfc\ubb3c\uc740 0\uacfc 1\uc774\uae30 \ub54c\ubb38\uc5d0 hidden_size\ub294 2\uc785\ub2c8\ub2e4.<\/p>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\">class SimpanClassificationModel(nn.Module):\n    \n    def __init__(self, input_size, hidden_size):\n        super().__init__()\n        \n        self.embedding = nn.Embedding(input_size, 300)\n        \n        self.rnn = nn.LSTM(input_size=300, hidden_size=100, num_layers=4, batch_first=True, bidirectional=True)\n        \n        self.layers = nn.Sequential(\n            nn.ReLU(),\n            nn.Linear(100*2,100),\n            nn.Linear(100,30),\n            \n            nn.Linear(30, hidden_size),\n        )\n        \n        self.softmax = nn.Softmax(dim=-1)\n        \n    def forward(self, x):\n        y = self.embedding(x)\n        y,_ = self.rnn(y)\n        y = self.layers(y)\n        \n        return self.softmax(y[:,-1,:])\n    \ninput_size = vo.n_vocab\nhidden_size = torch.unique(y_train[1]).size(dim=-1)\n\nmodel = SimpanClassificationModel(input_size, hidden_size)\nmodel = model.cuda()<\/pre>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\"># loss &amp; optimizer setting\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters())\n\nhist_loss = []\nhist_accr = []\n\nepochs = 501\n# start training\nmodel.train()\n\nfor epoch in range(epochs):\n    epoch_loss = 0\n    for x,y in train_loader:\n      x, y = x.to(device), y.to(device)\n      output = model(x)\n      loss = criterion(output, y.long())\n        \n      optimizer.zero_grad()\n      loss.backward()\n      optimizer.step()\n        \n      epoch_loss += loss.item()\n      accuracy = (torch.argmax(output, dim=-1) == y).float().mean().item()\n    \n      \n    hist_loss.append(epoch_loss)\n    hist_accr.append(accuracy)\n        \n    print('Cost: {:.6f}, Accuracy : {:.6f}'.format(loss.item(),accuracy))\n    print('--{}--'.format(epoch))<\/pre>\n\n\n\n<p>\ud559\uc2b5\uc758 \uc9c4\ud589\uc0c1\ud669\uc744 \uae30\ub85d\ud558\uae30 \uc704\ud574\uc11c 2\uac1c\uc758 \ubc30\uc5f4(hist_loss, hist_accr)\uc744 \uc0ac\uc6a9\ud569\ub2c8\ub2e4.<\/p>\n\n\n\n<p>hist_loss\ub294 loss\uac12\uc758 \ubcc0\ud654\ub97c \uae30\ub85d\ud558\ub294 \ubc30\uc5f4\uc774\uba70 hist_accr\uc740 \ud574\ub2f9 \ubaa8\ub378\uc758 \uc815\ud655\ub3c4 \uc815\ubcf4\ub97c \uc5bb\uae30 \uc704\ud574\uc11c \ub9cc\ub4e0 \ubc30\uc5f4\uc785\ub2c8\ub2e4. \ud559\uc2b5\uc774 \uc9c4\ud589\ub418\uba70 \ud574\ub2f9 \ubc30\uc5f4\uc5d0 \ub370\uc774\ud130\uac00 \uae30\ub85d\ub418\uace0 matplotlib.pyplot\uc744 \uc0ac\uc6a9\ud574\uc11c \uadf8\ub798\ud504\ub97c \uadf8\ub824\ubd05\ub2c8\ub2e4.<\/p>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\">import matplotlib.pyplot as plt\n\nfig, ax = plt.subplots(2,1)\nfig.set_size_inches((12, 8)) \n\nax[0].set_title('Loss')\nax[0].plot(hist_loss, color='red')\nax[0].set_ylabel('Loss')\nax[1].set_title('Accuracy')\nax[1].plot(hist_accr, color='blue')\nax[1].set_ylabel('Accuracy')\nax[1].set_xlabel('Epochs')\n\nplt.show()<\/pre>\n\n\n\n<figure class=\"wp-block-image size-large\"><img loading=\"lazy\" width=\"727\" height=\"496\" src=\"http:\/\/cedartrees.co.kr\/wp-content\/uploads\/2020\/08\/text-classification-torch-gpu-loss-accr.png\" alt=\"\" class=\"wp-image-344\" srcset=\"http:\/\/blog.cedartrees.co.kr\/wp-content\/uploads\/2020\/08\/text-classification-torch-gpu-loss-accr.png 727w, http:\/\/blog.cedartrees.co.kr\/wp-content\/uploads\/2020\/08\/text-classification-torch-gpu-loss-accr-300x205.png 300w\" sizes=\"(max-width: 727px) 100vw, 727px\" \/><\/figure>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\">model.eval()\n    \nfor xv, yv in valid_loader:\n    output = model(x)\n    \n    accuracy = (torch.argmax(output, dim=-1) == y).float().mean().item()\n\n    hist_loss.append(loss.item())\n    hist_accr.append(accuracy)\n\n    print('Accuracy : {:.6f}'.format(accuracy))<\/pre>\n\n\n\n<p>\ubaa8\ub378\uc758 \ud559\uc2b5\uc774 \uc644\ub8cc\ub41c \ud6c4 valid data\ub97c \ud1b5\ud574\uc11c \ud559\uc2b5 \ubaa8\ub378\uc758 \uc815\ud655\ub3c4\ub97c \uc54c\uc544\ubd05\ub2c8\ub2e4. <\/p>\n\n\n\n<pre class=\"EnlighterJSRAW\" data-enlighter-language=\"generic\" data-enlighter-theme=\"\" data-enlighter-highlight=\"\" data-enlighter-linenumbers=\"\" data-enlighter-lineoffset=\"\" data-enlighter-title=\"\" data-enlighter-group=\"\">Accuracy : 0.916667\nAccuracy : 0.916667\nAccuracy : 0.916667\nAccuracy : 0.916667\nAccuracy : 0.916667\nAccuracy : 0.916667\nAccuracy : 0.916667\nAccuracy : 0.916667\nAccuracy : 0.916667\nAccuracy : 0.916667\nAccuracy : 0.916667\nAccuracy : 0.916667\nAccuracy : 0.916667\nAccuracy : 0.916667<\/pre>\n\n\n\n<p><\/p>\n","protected":false},"excerpt":{"rendered":"<p>Colaboratory(\ud639\uc740 Colab)\ub97c \uc0ac\uc6a9\ud558\uba74 \ube0c\ub77c\uc6b0\uc800\uc5d0\uc11c Python\uc744 \uc791\uc131\ud558\uace0 \uc2e4\ud589\ud560 \uc218 \uc788\uc2b5\ub2c8\ub2e4. \uc7a5\uc810\uc774\ub77c\uba74 \ubcc4\ub3c4\uc758 \uad6c\uc131\uc774 \ud544\uc694 \uc5c6\uace0 \ubb34\ub8cc\ub85c GPU\ub97c \uc0ac\uc6a9\ud560 \uc218 \uc788\ub2e4\ub294 \uc7a5\uc810\uc774 \uc788\uc2b5\ub2c8\ub2e4. \ub610 \ub9cc\ub4e0 \ucf54\ub4dc\ub97c \uac04\ub2e8\ud558\uac8c \uacf5\uc720\ud560 \uc218\ub3c4 \uc788\uc2b5\ub2c8\ub2e4. \uac10\uc131\ubd84\uc11d(Text Classification)\uc5d0 \uc0ac\uc6a9\ud55c \ub370\uc774\ud130\ub294 \ub124\uc774\ubc84\uc5d0\uc11c \uacf5\uac1c\ud55c \uc601\ud654 \ud3c9\uc810 \uc815\ubcf4\uc785\ub2c8\ub2e4. \ud574\ub2f9 \ub370\uc774\ud130\ub294 \uc544\ub798 \ub9c1\ud06c\uc5d0\uc11c \ubc1b\uc744 \uc218 \uc788\uc2b5\ub2c8\ub2e4.https:\/\/github.com\/e9t\/nsmc \ubcf8 \uc608\uc81c\uc5d0\uc11c \uc0ac\uc6a9\ud560 konlpy\ub97c Colab\uc5d0 \uc124\uce58\ud569\ub2c8\ub2e4. Colab\uc5d0\uc11c \uc0ac\uc6a9\ud560 \ud30c\uc77c\uc744 \uc0ac\uc6a9\uc790 \uacc4\uc815\uc758 &hellip; <\/p>\n<p class=\"link-more\"><a href=\"http:\/\/blog.cedartrees.co.kr\/index.php\/2020\/08\/08\/google-colab-gpu-text-classification\/\" class=\"more-link\">\ub354 \ubcf4\uae30<span class=\"screen-reader-text\"> &#8220;Google Colab GPU Text-classification&#8221;<\/span><\/a><\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":[],"categories":[34,1],"tags":[79,124,123,6,61,56],"_links":{"self":[{"href":"http:\/\/blog.cedartrees.co.kr\/index.php\/wp-json\/wp\/v2\/posts\/336"}],"collection":[{"href":"http:\/\/blog.cedartrees.co.kr\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"http:\/\/blog.cedartrees.co.kr\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"http:\/\/blog.cedartrees.co.kr\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"http:\/\/blog.cedartrees.co.kr\/index.php\/wp-json\/wp\/v2\/comments?post=336"}],"version-history":[{"count":6,"href":"http:\/\/blog.cedartrees.co.kr\/index.php\/wp-json\/wp\/v2\/posts\/336\/revisions"}],"predecessor-version":[{"id":345,"href":"http:\/\/blog.cedartrees.co.kr\/index.php\/wp-json\/wp\/v2\/posts\/336\/revisions\/345"}],"wp:attachment":[{"href":"http:\/\/blog.cedartrees.co.kr\/index.php\/wp-json\/wp\/v2\/media?parent=336"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"http:\/\/blog.cedartrees.co.kr\/index.php\/wp-json\/wp\/v2\/categories?post=336"},{"taxonomy":"post_tag","embeddable":true,"href":"http:\/\/blog.cedartrees.co.kr\/index.php\/wp-json\/wp\/v2\/tags?post=336"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}