Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/csharp/321.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
C# 如何在Html敏捷包中获得重定向URL_C#_Parsing_Redirect_Href_Html Agility Pack - Fatal编程技术网

C# 如何在Html敏捷包中获得重定向URL

C# 如何在Html敏捷包中获得重定向URL,c#,parsing,redirect,href,html-agility-pack,C#,Parsing,Redirect,Href,Html Agility Pack,我想解析集合URL中的所有URL。我找到了以下方法: public static List<string> ParseLinks(string urlToCrawl) { WebClient webClient = new WebClient(); byte[] data = webClient.DownloadData(urlToCrawl); string download = Encoding.ASCII.GetStr

我想解析集合URL中的所有URL。我找到了以下方法:

 public static List<string> ParseLinks(string urlToCrawl)
    {
        WebClient webClient = new WebClient();

        byte[] data = webClient.DownloadData(urlToCrawl);
        string download = Encoding.ASCII.GetString(data);

        HashSet<string> list = new HashSet<string>();

        var doc = new HtmlDocument();
        doc.LoadHtml(download);
        HtmlNodeCollection nodes = doc.DocumentNode.SelectNodes("//a[@href]");

        foreach (var n in nodes)
        {
            string href = n.Attributes["href"].Value;
            list.Add(GetAbsoluteUrlString(urlToCrawl, href));
        }
        return list.ToList();
    }

    static string GetAbsoluteUrlString(string baseUrl, string url)
    {
        var uri = new Uri(url, UriKind.RelativeOrAbsolute);
        if (!uri.IsAbsoluteUri)
            uri = new Uri(new Uri(baseUrl), uri);
        return uri.ToString();
    }
公共静态列表解析链接(字符串urlToCrawl)
{
WebClient WebClient=新的WebClient();
字节[]数据=webClient.DownloadData(urlToCrawl);
字符串下载=Encoding.ASCII.GetString(数据);
HashSet list=新的HashSet();
var doc=新的HtmlDocument();
doc.LoadHtml(下载);
HtmlNodeCollection nodes=doc.DocumentNode.SelectNodes(“//a[@href]”);
foreach(节点中的变量n)
{
字符串href=n.Attributes[“href”].Value;
添加(GetAbsoluteUrlString(urlToCrawl,href));
}
return list.ToList();
}
静态字符串GetAbsoluteUrlString(字符串baseUrl、字符串url)
{
var uri=新的uri(url,UriKind.RelativeOrAbsolute);
如果(!uri.IsAbsoluteUri)
uri=新uri(新uri(baseUrl),uri);
返回uri.ToString();
}

一切都很好,但在一些网站上,链接通过他们的网站(他们重定向)。我有一个链接:。当我想使用我的方法提取链接时,ParseLinks方法会给我错误的URL,比如。。。我的期望是。。。因为当我们转到上面的链接时,它会重定向到。那么,如何从当前URL获取重定向页面?请给我一些解决问题的办法

我用几个关键词做了一些研究,找到了解决问题的方法。以下方法解决了我的问题:

public static string GetFinalRedirect(string url)
{
    if(string.IsNullOrWhiteSpace(url))
        return url;

    int maxRedirCount = 8;  // prevent infinite loops
    string newUrl = url;
    do
    {
        HttpWebRequest req = null;
        HttpWebResponse resp = null;
        try
        {
            req = (HttpWebRequest) HttpWebRequest.Create(url);
            req.Method = "HEAD";
            req.AllowAutoRedirect = false;
            resp = (HttpWebResponse)req.GetResponse();
            switch (resp.StatusCode)
            {
                case HttpStatusCode.OK:
                    return newUrl;
                case HttpStatusCode.Redirect:
                case HttpStatusCode.MovedPermanently:
                case HttpStatusCode.RedirectKeepVerb:
                case HttpStatusCode.RedirectMethod:
                    newUrl = resp.Headers["Location"];
                    if (newUrl == null)
                        return url;

                    if (newUrl.IndexOf("://", System.StringComparison.Ordinal) == -1)
                    {
                        // Doesn't have a URL Schema, meaning it's a relative or absolute URL
                        Uri u = new Uri(new Uri(url), newUrl);
                        newUrl = u.ToString();
                    }
                    break;
                default:
                    return newUrl;
            }
            url = newUrl;
        }
        catch (WebException)
        {
            // Return the last known good URL
            return newUrl;
        }
        catch (Exception ex)
        {
            return null;
        }
        finally
        {
            if (resp != null)
                resp.Close();
        }
    } while (maxRedirCount-- > 0);

    return newUrl;
}