Android Rss解析问题(XML)
这是我的RSS,XML文件,我想从中提取数据。我已经用SAX解析器完成了我的全部工作,一切都很好。但是我解析了整个RSS文件,得到了每个元素的数据 但我的问题是,我得到的是整个文件元素数据,但我只需要其中的元素Android Rss解析问题(XML),android,Android,这是我的RSS,XML文件,我想从中提取数据。我已经用SAX解析器完成了我的全部工作,一切都很好。但是我解析了整个RSS文件,得到了每个元素的数据 但我的问题是,我得到的是整个文件元素数据,但我只需要其中的元素 <rss version="2.0"> <channel> <title>RSS Feed</title> <link>http://www.xyz.com</link> <description>Cal
<rss version="2.0">
<channel>
<title>RSS Feed</title>
<link>http://www.xyz.com</link>
<description>Calendar RSS Feeds</description>
<language>en-us</language>
<ttl>60</ttl>
<item>
<title>
title 1
</title>
<description>description 1</description>
<link>
http://www.xyz.com
</link>
<guid isPermaLink="false">6027@http://www.xyz.com</guid>
</item>
<item>
<title>
title 2
</title>
<description>description 2</description>
<link>
http://www.xyz.com
</link>
<guid isPermaLink="false">5554@http://www.xyz.com</guid>
</item>
<item>
</channel>
</rss>
任何人,请建议我如何我只能检索的元素是在下元素只
我也尝试过这个,但这给了我错误
public void startElement(String uri, String localName, String qName,
Attributes attributes) throws SAXException {
// TODO Auto-generated method stub
current = true;
if (localName.equals("channel"))
{
if (localName.equals("item"))
{
itemList = new ItemList();
}
}
}
提前谢谢。试试这个,希望对你有所帮助
url = new URL("http://www.abcd.com/rss.xml");//.......
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
if(conn.getResponseCode() == HttpURLConnection.HTTP_OK){
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
Document doc;
doc = db.parse(url.openStream());
doc.getDocumentElement().normalize();
NodeList itemLst = doc.getElementsByTagName("item");
Array.Description = new String[itemLst.getLength()];//........
Array.Title = new String[itemLst.getLength()];
Array.image = new String[itemLst.getLength()];
Array.Date = new String[itemLst.getLength()];
for(int i=0; i < itemLst.getLength(); i++){
Node item = itemLst.item(i);
if(item.getNodeType() == Node.ELEMENT_NODE){
Element ielem = (Element)item;
NodeList title = ielem.getElementsByTagName("title");
NodeList date = ielem.getElementsByTagName("pubDate");
NodeList description = ielem.getElementsByTagName("description");
////.........
Array.Title[i] = title.item(0).getChildNodes().item(0).getNodeValue();
Array.Description[i] = description.item(0).getChildNodes().item(0).getNodeValue();
Array.Date[i] = date.item(0).getChildNodes().item(0).getNodeValue();
我已经尝试过这个DocumentBuilder,我从中得到了结果,但是在这个中我不知道如何实现StringBuffer,因为我没有在DocumentBuider中获得完整的数据。你能建议我如何用DocumentBuilder实现StringBuffer吗?谢谢…我已经构建了一个RSS阅读器。我将发布我用于解析的整个文件。请检查一下。。。
url = new URL("http://www.abcd.com/rss.xml");//.......
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
if(conn.getResponseCode() == HttpURLConnection.HTTP_OK){
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
Document doc;
doc = db.parse(url.openStream());
doc.getDocumentElement().normalize();
NodeList itemLst = doc.getElementsByTagName("item");
Array.Description = new String[itemLst.getLength()];//........
Array.Title = new String[itemLst.getLength()];
Array.image = new String[itemLst.getLength()];
Array.Date = new String[itemLst.getLength()];
for(int i=0; i < itemLst.getLength(); i++){
Node item = itemLst.item(i);
if(item.getNodeType() == Node.ELEMENT_NODE){
Element ielem = (Element)item;
NodeList title = ielem.getElementsByTagName("title");
NodeList date = ielem.getElementsByTagName("pubDate");
NodeList description = ielem.getElementsByTagName("description");
////.........
Array.Title[i] = title.item(0).getChildNodes().item(0).getNodeValue();
Array.Description[i] = description.item(0).getChildNodes().item(0).getNodeValue();
Array.Date[i] = date.item(0).getChildNodes().item(0).getNodeValue();
public class headlinesparser {
public static void parse(){
URL url;
try {
url = new URL("http://www.abcd.com/taxonomy/term/19/rss.xml");//.......
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
if(conn.getResponseCode() == HttpURLConnection.HTTP_OK){
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
Document doc;
doc = db.parse(url.openStream());
doc.getDocumentElement().normalize();
NodeList itemLst = doc.getElementsByTagName("item");
Headlines.Description = new String[itemLst.getLength()];//........
Headlines.Title = new String[itemLst.getLength()];
Headlines.image = new String[itemLst.getLength()];
Headlines.Date = new String[itemLst.getLength()];
for(int i=0; i < itemLst.getLength(); i++){
Node item = itemLst.item(i);
if(item.getNodeType() == Node.ELEMENT_NODE){
Element ielem = (Element)item;
NodeList title = ielem.getElementsByTagName("title");
NodeList date = ielem.getElementsByTagName("pubDate");
NodeList description = ielem.getElementsByTagName("description");
////.........
Headlines.Title[i] = title.item(0).getChildNodes().item(0).getNodeValue();
Headlines.Description[i] = description.item(0).getChildNodes().item(0).getNodeValue();
Headlines.Date[i] = date.item(0).getChildNodes().item(0).getNodeValue();
if (Headlines.Description[i].contains("<img ")){
String img = Headlines.Description[i].substring(Headlines.Description[i].indexOf("<img "));
String cleanUp = img.substring(0, img.indexOf(">")+1);
img = img.substring(img.indexOf("src=") + 5);
int indexOf = img.indexOf("'");
if (indexOf==-1){
indexOf = img.indexOf("\"");
}
img = img.substring(0, indexOf);
//setImgLink(img);
Headlines.image[i]=img;
}
}
}
}
} catch (MalformedURLException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (DOMException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (ParserConfigurationException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (SAXException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}