[root@localhost cc]# cat 2.txt adc 3 5 a d aa 3 adfa d ba 3 adf 去重第一列重复的行: [root@localhost cc]# cat 2.txt |awk '!a[$1]++{print}'adc 3 5 a d a 重复的行取最上面一行记录 去重以第一列和第二列重复的行: [root@localhost cc]# cat 2.txt |awk '!a[$1" "$2]++{print}'adc 3 5 a d aa 3
create or replaceFUNCTION "SF_SPLIT_ACCOUNT_ID_LIST" ( account_id_list IN VARCHAR2)RETURN VARCHAR2 ASv_account_id_list VARCHAR2(5000); BEGIN WITH ACCTS AS (SELECT DISTINCT str from (SELECT REGEXP_SUBSTR(replace(account_id_list,', ',','), '[^,]+'
#-----------awk.awk------------ { if(data[$0]++ == 0) lines[++count] = $0} END { for (i=1;i<=count;i++) print lines[i]} 脚本用法如下: awk -f awk.awk text_to_process cat text_to_process: jia yang lu yang 那么处理之后的结果就为: jia yang lu C++中的set