Perl 数据中的字段 ##如果有更多字段,可以将其增加1 my@fields_to_remove=(0,1,2,3,4); my$change=$#要删除的字段; while(我的$line=){ 如果($line=~/1/){ my@new=split/\s+/,$line; 我的$i=0; for(@new){ 除非($==0){ @fields_to_remove=grep(!/$i/,@fields_to_remove); } $i++; } foreach my$字段(@fields\u to\u remove){ $new[$field]=“x”; } 我的$new=加入“”,@new; $new=~s/(\s+)-x//g; 打印$temp$new。“\n”; ##如果检测到新的更改,请重新启动 ##这应该在有限的时间内重复 ##随着脚本不断学习并最终停止 如果($#字段_至_删除!=$更改){ $change=$#要删除的字段; 寻求$fh,0,0; 关闭$temp; 取消链接“temp.txt”; 打开$temp,'>','temp.txt'; } }否则{ ##nothing—删除0行 } } ###这只是为了显示哪些字段已被删除 打印要删除的转储文件\@字段;

Perl 数据中的字段 ##如果有更多字段,可以将其增加1 my@fields_to_remove=(0,1,2,3,4); my$change=$#要删除的字段; while(我的$line=){ 如果($line=~/1/){ my@new=split/\s+/,$line; 我的$i=0; for(@new){ 除非($==0){ @fields_to_remove=grep(!/$i/,@fields_to_remove); } $i++; } foreach my$字段(@fields\u to\u remove){ $new[$field]=“x”; } 我的$new=加入“”,@new; $new=~s/(\s+)-x//g; 打印$temp$new。“\n”; ##如果检测到新的更改,请重新启动 ##这应该在有限的时间内重复 ##随着脚本不断学习并最终停止 如果($#字段_至_删除!=$更改){ $change=$#要删除的字段; 寻求$fh,0,0; 关闭$temp; 取消链接“temp.txt”; 打开$temp,'>','temp.txt'; } }否则{ ##nothing—删除0行 } } ###这只是为了显示哪些字段已被删除 打印要删除的转储文件\@字段;,perl,bash,unix,awk,Perl,Bash,Unix,Awk,我用9个字段25mb的数据文件进行了测试,效果非常好。虽然速度不是很快,但也没有消耗太多内存。我的紧凑型和大文件兼容的替代方案是使用grep和cut。唯一的缺点:由于for循环,对于大文件来说很长 # Remove constant lines using grep $ grep -v "^[0 ]*$\|^[1 ]*$" $fIn > $fTmp # Remove constant columns using cut and wc $ nc=`cat $fTmp |

我用9个字段25mb的数据文件进行了测试,效果非常好。虽然速度不是很快,但也没有消耗太多内存。

我的紧凑型和大文件兼容的替代方案是使用grep和cut。唯一的缺点:由于for循环,对于大文件来说很长

# Remove constant lines using grep
    $ grep -v "^[0 ]*$\|^[1 ]*$" $fIn > $fTmp

# Remove constant columns using cut and wc

    $ nc=`cat $fTmp | head -1 | wc -w` 
    $ listcol=""
    $ for (( i=1 ; i<=$nc ; i++ ))
    $ do
    $   nitem=`cut -d" " -f$i $fTmp | sort | uniq | wc -l`
    $   if [ $nitem -gt 1 ]; then listcol=$listcol","$i ;fi
    $ done
    $ listcol2=`echo $listcol | sed 's/^,//g'`
    $ cut -d" " -f$listcol2 $fTmp | sed 's/ //g' > $fOut
#使用grep删除常量行
$grep-v“^[0]*$\\\124;^[1]*$”$fIn>$fTmp
#使用cut和wc移除固定柱
$nc=`cat$fTmp|head-1|wc-w`
$listcol=“”
$for((i=1;i$fOut)

检查行的方法如下:
awk'/[^0[:blank:][]/'文件

它只是说明一行是否包含与
0
不同的字符或一个字符,然后打印该行

如果您现在想查看这些列,那么我建议对

awk'

NR==1{for(i=1;我看了一下。类似的问题在这里请试一试,类似的问题在这里很抱歉,我无法想出使用sed删除列的解决方案。我认为awk是最好的选择。但是行删除可以按以下方式完成:sed-i'/^[0]\+$/d'文件实际上我不想给它一个像/0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1的精确模式,因为它对于包含数千个字符的大文件来说太麻烦了结果是1 0 1 1 1 1 1 1 0 0结果应该是1 0 1 1 1 1 1 1 1 1 1 1 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 1 1 1你能重新检查吗?谢谢你更新你的问题并粘贴此输入?在评论中很难理解。当然,请看我更新的答案。缺少一个花括号。不。它没有给出正确的r问题的第二部分是删除与Glenn类似的列answer@stack0114106它确实有效,只是测试了一下。它确实很相似,只是有一些轻微的修改。正如我在答案中提到的。
1 0 1 1 
1 1 1 1 
0 1 1 1 
1 1 0 0 
0 0 1 1 
  sed '/0 0 0 0/d' or egrep -v '^(0 0 0 0 )$'
$ awk '{for (i=1; i<=NF; i++) {if ($i) {print; next}}}' file | awk '{l=NR; c=NF; for (i=1; i<=c; i++) {a[l,i]=$i; if ($i) e[i]++}} END{for (i=1; i<=l; i++) {for (j=1; j<=c; j++) {if (e[j]) printf "%d ",a[i,j] } printf "\n"}}'
$ awk '{for (i=1; i<=NF; i++) {if ($i) {print; next}}}' file
1 0 1 1
1 0 1 0
1 0 0 1
$ awk '{l=NR; c=NF;
  for (i=1; i<=c; i++) {
      a[l,i]=$i;
      if ($i) e[i]++
  }}
  END{
    for (i=1; i<=l; i++){
      for (j=1; j<=c; j++)
    {if (e[j]) printf "%d ",a[i,j] }
    printf "\n"
      }
    }'
$ cat a
1 0 1 0 1
0 0 0 0 0
1 1 1 0 1
0 1 1 0 1
1 1 0 0 0
0 0 0 0 0
0 0 1 0 1
$ awk '{for (i=1; i<=NF; i++) {if ($i) {print; next}}}' a | awk '{l=NR; c=NF; for (i=1; i<=c; i++) {a[l,i]=$i; if ($i) e[i]++}} END{for (i=1; i<=l; i++) {for (j=1; j<=c; j++) {if (e[j]) printf "%d ",a[i,j] } printf "\n"}}'
1 0 1 1 
1 1 1 1 
0 1 1 1 
1 1 0 0 
0 0 1 1 
$ cat file 
1 0 1 1
0 0 0 0
1 0 1 0
0 0 0 0
1 0 0 1
$ awk '{for (i=1; i<=NF; i++) {if ($i) {print; next}}}' file | awk '{l=NR; c=NF; for (i=1; i<=c; i++) {a[l,i]=$i; if ($i) e[i]++}} END{for (i=1; i<=l; i++) {for (j=1; j<=c; j++) {if (e[j]) printf "%d ",a[i,j] } printf "\n"}}'
1 1 1 
1 1 0 
1 0 1 
perl -n -e '$_ !~ /0 0 0 0/ and print' data.txt
perl -n -e '/1/ and print' data.txt
perl -n -e "/1/ and print" data.txt
#!/usr/bin/perl
use warnings;
use strict;

my @nonzero;                                       # What columns where not zero.
my @output;                                        # The whole table for output.

while (<>) {
    next unless /1/;
    my @col = split;
    $col[$_] and $nonzero[$_] ||= 1 for 0 .. $#col;
    push @output, \@col;
}

my @columns = grep $nonzero[$_], 0 .. $#nonzero;   # What columns to output.
for my $line (@output) {
    print "@{$line}[@columns]\n";
}
awk '{show=0; for (i=1; i<=NF; i++) {if ($i!=0) show=1; col[i]+=$i;}} show==1{tr++; for (i=1; i<=NF; i++) vals[tr,i]=$i; tc=NF} END{for(i=1; i<=tr; i++) { for (j=1; j<=tc; j++) { if (col[j]>0) printf("%s%s", vals[i,j], OFS)} print ""; } }' file
awk '{
   show=0;
   for (i=1; i<=NF; i++) {
      if ($i != 0)
         show=1;
    col[i]+=$i;
   }
}
show==1 {
   tr++;
   for (i=1; i<=NF; i++)
      vals[tr,i]=$i;
   tc=NF
}
END {
   for(i=1; i<=tr; i++) {
      for (j=1; j<=tc; j++) {
         if (col[j]>0)
            printf("%s%s", vals[i,j], OFS)
      }
      print ""
   }
}' file
awk '
    NR==1   {for (i=1; i<=NF; i++) if ($i == 0) zerocol[i]=1; next} 
    NR==FNR {for (idx in zerocol) if ($idx) delete zerocol[idx]; next}
    {p=0; for (i=1; i<=NF; i++) if ($i) {p++; break}}
    p {for (i=1; i<=NF; i++) if (!(i in zerocol)) printf "%s%s", $i, OFS; print ""}
' file file
1 0 1 1 
1 1 1 1 
0 1 1 1 
1 1 0 0 
0 0 1 1 
#! /usr/bin/env perl
#
use strict;
use warnings;
use autodie;
use feature qw(say);
use Data::Dumper;

my @array_of_columns;
for my $row ( <DATA> ) {
    chomp $row;
    next if $row =~ /^(0\s*)+$/;  #Skip zero rows;
    my @columns = split /\s+/, $row;
    for my $index ( (0..$#columns) ) {
        push @{ $array_of_columns[$index] }, $columns[$index];
    }
}

# Remove the columns that contain nothing but zeros;
for my $column ( (0..$#array_of_columns) ) {
    my $index = $#array_of_columns - $column;
    my $values = join "", @{ $array_of_columns[$index] };
    if ( $values =~ /^0+$/ ) {
        splice ( @array_of_columns, $index, 1 );
    }
}

say Dumper \@array_of_columns;
__DATA__
1 0 1 0 1
0 0 0 0 0
1 1 1 0 1
0 1 1 0 1
1 1 0 0 0
0 0 0 0 0
0 0 1 0 1
#!/usr/bin/env perl

use strict;
use warnings;

filter_zeros(\*DATA);

sub filter_zeros {
    my $fh = shift;
    my $pos = tell $fh;

    my %nonzero_cols;
    my %zero_rows;

    while (my $line = <$fh>) {
        last unless $line =~ /\S/;
        my @row = split ' ', $line;
        my @nonzero_idx = grep $row[$_], 0 .. $#row;
        unless (@nonzero_idx) {
            $zero_rows{$.} = undef;
            next;
        }
        $nonzero_cols{$_} = undef for @nonzero_idx;
    }

    my @matrix;

    {
        my @idx = sort {$a <=> $b } keys %nonzero_cols;
        seek $fh, $pos, 0;
        local $. = 0;

        while (my $line = <$fh>) {
            last unless $line =~ /\S/;
            next if exists $zero_rows{$.};
            print join(' ', (split ' ', $line)[@idx]), "\n";
        }
    }
}

__DATA__
1 0 1 0 1
0 0 0 0 0
1 1 1 0 1
0 1 1 0 1
1 1 0 0 0
0 0 0 0 0
0 0 1 0 1
1 0 1 1 1 1 1 1 0 1 1 1 1 1 0 0 0 0 1 1
perl -nE's/\s+//g;$m|=$v=pack("b*",$_);push@v,$v if$v!~/\000/}{$m=unpack("b*",$m);@m=split//,$m;@m=grep{$m[$_]eq"1"}0..$#m;say"@{[(split//,unpack(q(b*),$_))[@m]]}"for@v'
#!/usr/bin/gawk -f

BEGIN {
    FS = " "
}

{
    for (c = 1; c <= NF; ++c) {
        v = $c
        map[c, NR] = v
        ctotal[c] += v
        rtotal[NR] += v
    }
    fields[NR] = NF
}

END {
    for (r = 1; r <= NR; ++r) {
        if (rtotal[r]) {
            append = 0
            f = fields[r]
            for (c = 1; c <= f; ++c) {
                if (ctotal[c]) {
                    if (append) {
                        printf " " map[c, r]
                    } else {
                        printf map[c, r]
                        append = 1
                    }
                }
            }
            print ""
        }
    }
}
#! /usr/bin/env perl
use strict;
use warnings;
use Data::Dumper;

open my $fh, '<', 'file.txt' or die $!;

##open temp file for output
open my $temp, '>', 'temp.txt' or die $!;

##how many field you have in you data
##you can increase this by one if you have more fields
my @fields_to_remove = (0,1,2,3,4);

my $change = $#fields_to_remove;

while (my $line = <$fh>){

    if ($line =~ /1/){

        my @new = split /\s+/, $line;
        my $i = 0;
        for (@new){
            unless ($_ == 0){
                @fields_to_remove = grep(!/$i/, @fields_to_remove);
            }
            $i++;
        }

        foreach my $field (@fields_to_remove){
            $new[$field] = 'x';
        }

        my $new = join ' ', @new;
        $new =~ s/(\s+)?x//g;
        print $temp $new . "\n";

        ##if a new change detected start over
        ## this should repeat for limited time
        ## as the script keeps learning and eventually stop
        if ($#fields_to_remove != $change){
            $change = $#fields_to_remove;
            seek $fh, 0, 0;
            close $temp;
            unlink 'temp.txt';
            open $temp, '>', 'temp.txt';
        }

    } else {
        ##nothing -- removes 0 lines
    }
}

### this is just for showing you which fields has been removed
print Dumper \@fields_to_remove;
# Remove constant lines using grep
    $ grep -v "^[0 ]*$\|^[1 ]*$" $fIn > $fTmp

# Remove constant columns using cut and wc

    $ nc=`cat $fTmp | head -1 | wc -w` 
    $ listcol=""
    $ for (( i=1 ; i<=$nc ; i++ ))
    $ do
    $   nitem=`cut -d" " -f$i $fTmp | sort | uniq | wc -l`
    $   if [ $nitem -gt 1 ]; then listcol=$listcol","$i ;fi
    $ done
    $ listcol2=`echo $listcol | sed 's/^,//g'`
    $ cut -d" " -f$listcol2 $fTmp | sed 's/ //g' > $fOut
awk '
    NR==1   {for (i=1; i<=NF; i++) if ($i == 0) zerocol[i]=1; next} 
    NR==FNR {for (idx in zerocol) if ($idx) delete zerocol[idx]; next}
    /[^0[:blank:]]/ {for (i=1; i<=NF; i++) if (i in zerocol) $i=""; print}
' file file