java 兩個csv文件數據去重


1.pom.xml配置

<dependency>
      <groupId>commons-io</groupId>
       <artifactId>commons-io</artifactId>
       <version>2.4</version>
</dependency>

2.實現

package com.tangxin.kafka.service;

import org.apache.commons.io.FileUtils;
import org.apache.commons.io.LineIterator;
import org.springframework.util.StringUtils;

import java.io.*;
import java.math.BigDecimal;
import java.util.*;

/**
 * 兩個csv文件數據去重
 */
public class CSVDeduplication {

    private static final String CSV_PATH = "I:\\";


    public static List<String> ids(String path) {
        List<String> result = new ArrayList<>();
        File csv = new File(path);  // CSV文件路徑
        LineIterator it = null;
        try {
            it = FileUtils.lineIterator(csv);
            while (it.hasNext()) {
                String line = it.nextLine();
                if (line.trim().contains("id")) {
                    continue;
                }
                String[] arr = line.split(",");
                String id = arr[0];
                id = id.replaceAll("\"", "").trim();
                result.add(id);
            }
        } catch (Exception e) {
        } finally {
            LineIterator.closeQuietly(it);
        }
        return result;
    }


    public static void main(String[] args) throws Exception {
        String path1 = CSV_PATH+"100w.csv";
        String path2 = CSV_PATH+"300w.csv";



        List<String> ids1 = ids(path1);
        Set<String> idSet1 = new HashSet<>();
        Set<String> idSet2 = new HashSet<>();

        for (int i = 0; i < ids1.size(); i++) {
            if(StringUtils.isEmpty(ids1.get(i))){
                continue;
            }
            idSet1.add(ids1.get(i));
        }

        List<String> ids2 = ids(path2);

        for (int i = 0; i < ids2.size(); i++) {
            if(StringUtils.isEmpty(ids2.get(i))){
                continue;
            }
            idSet2.add(ids2.get(i));
        }

        System.out.println("用戶100萬=" + idSet1.size());
        System.out.println("用戶300萬=" + idSet2.size());
        BigDecimal b1 = new BigDecimal(idSet1.size());
        BigDecimal b2 = new BigDecimal(idSet2.size());
        BigDecimal b3 = b1.add(b2);
        System.out.println("用戶100萬和用戶300萬="+b3.toString());

        List<String> ids4 = new ArrayList<>();//重復數據


        Set<String> ids3 = new HashSet<>();

        Iterator<String> iterator1 = idSet1.iterator();
        while (iterator1.hasNext()){
            String t1 = iterator1.next();
            ids3.add(t1);
        }

        Iterator<String> iterator2 = idSet2.iterator();
        while (iterator2.hasNext()){
            String t1 = iterator2.next();
            ids3.add(t1);
        }

        System.out.println("用戶100萬和用戶300萬去重=" + ids3.size());


        ids1.removeAll(ids3);
        ids2.removeAll(ids3);
        ids4.addAll(ids1);
        ids4.addAll(ids2);
        System.out.println("用戶100萬和用戶300萬重復="+ids4.size());


        Set<String> fiveMillion = splitHeadData(ids3, 50000);

        System.out.println("5W用戶推送數據:" + fiveMillion.size());

        List<String> staffsList = new ArrayList<>(fiveMillion);

        createCSV(staffsList,"5w.csv");


        System.out.println("剩余推送總數:" + ids3.size());

        System.out.println("============剩余總數每50w分頁顯示=================");

        List<List<String>> pageListTotal = pageList(ids3,500000);

        for (int i = 0; i < pageListTotal.size(); i++) {
            List<String> items = pageListTotal.get(i);
            createCSV(items,"50w"+i+".csv");
        }


    }

    public static Set<String> splitHeadData(Set<String> mySet, int size) {
        Set<String> result = new HashSet<>();
        Iterator<String> iterator = mySet.iterator();
        int count = 0;
        while (iterator.hasNext()) {
            if (count == size) {
                break;
            }
            result.add(iterator.next());
            count++;
        }
        mySet.removeAll(result);
        return result;
    }


    /**
     * 分頁list的id數據
     * @return
     */
    public static List<List<String>> pageList(Set<String> totalSet, int pageSize) {
        List<List<String>> allIdList = new ArrayList<>();
        List<String> idList = new ArrayList<>();
        Iterator<String> it = totalSet.iterator();
        int count = 0;
        while (it.hasNext()) {
            String id = it.next();
            if (count > pageSize) {
                allIdList.add(idList);
                count = 0;
                idList = new ArrayList<>();
            }
            idList.add(id);
            count++;
        }
        if (idList.size() > 0) {
            allIdList.add(idList);
        }
        return allIdList;
    }


    /**
     * 創建CSV文件
     */
    public static void createCSV(List<String> list,String fileName) {

        // 表格頭
        Object[] head = {"id"};
        List<Object> headList = Arrays.asList(head);

        //數據
        List<List<Object>> dataList = new ArrayList<>();
        List<Object> rowList;
        for (int i = 0; i < list.size(); i++) {
            rowList = new ArrayList<>();
            rowList.add(list.get(i));
            dataList.add(rowList);
        }

        String filePath = CSV_PATH; //文件路徑

        File csvFile;
        BufferedWriter csvWriter = null;
        try {
            csvFile = new File(filePath + fileName);
            File parent = csvFile.getParentFile();
            if (parent != null && !parent.exists()) {
                parent.mkdirs();
            }
            csvFile.createNewFile();

            // GB2312使正確讀取分隔符","
            csvWriter = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(csvFile), "GB2312"), 1024);


            // 寫入文件頭部
            writeRow(headList, csvWriter);

            // 寫入文件內容
            for (List<Object> row : dataList) {
                writeRow(row, csvWriter);
            }
            csvWriter.flush();
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            try {
                csvWriter.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }


    private static void writeRow(List<Object> row, BufferedWriter csvWriter) throws IOException {
        for (Object data : row) {
            StringBuffer sb = new StringBuffer();
            String rowStr = sb.append("\"").append(data).append("\",").toString();
            csvWriter.write(rowStr);
        }
        csvWriter.newLine();
    }

}

3.開始的實現思路和后面的實現思路

3.1 開始的實現思路

     讀取文件1.csv,數據大概有100多萬 讀取文件2.csv,數據大概有300多萬,然后用100萬和300萬的數據一個個去比較看哪些已經存在了,兩個for循環,100萬*300萬=3萬億次 卡着不動放棄了。

     然后想着用多線程把300萬數據分頁成每50萬來跑也是跑的很。

 

3.2 后面的實現思路

 

      代碼就在上面,整體思路就是通過java的Set集合來去重復,因為java單個循環處理還是很快的,注意需要配置jvm參數來跑不然會內存溢出:

VM options:

-Xms1g -Xmx1g -XX:SurvivorRatio=2 -XX:+UseParallelGC


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM